repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
saurabh6790/aimobilize-lib-backup
|
refs/heads/master
|
webnotes/tests/test_fmt_money.py
|
34
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import webnotes
from webnotes import _
from webnotes.utils import flt, cstr
def fmt_money(amount, precision=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = webnotes.conn.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + (precision and (decimal_str + decimals) or "")
amount = minus + amount
return amount
def get_number_format_info(format):
if format=="#.###":
return "", ".", 0
elif format=="#,###":
return "", ",", 0
elif format=="#,###.##" or format=="#,##,###.##":
return ".", ",", 2
elif format=="#.###,##":
return ",", ".", 2
elif format=="# ###.##":
return ".", " ", 2
else:
return ".", ",", 2
import unittest
class TestFmtMoney(unittest.TestCase):
def test_standard(self):
webnotes.conn.set_default("number_format", "#,###.##")
self.assertEquals(fmt_money(100), "100.00")
self.assertEquals(fmt_money(1000), "1,000.00")
self.assertEquals(fmt_money(10000), "10,000.00")
self.assertEquals(fmt_money(100000), "100,000.00")
self.assertEquals(fmt_money(1000000), "1,000,000.00")
self.assertEquals(fmt_money(10000000), "10,000,000.00")
self.assertEquals(fmt_money(100000000), "100,000,000.00")
self.assertEquals(fmt_money(1000000000), "1,000,000,000.00")
def test_negative(self):
webnotes.conn.set_default("number_format", "#,###.##")
self.assertEquals(fmt_money(-100), "-100.00")
self.assertEquals(fmt_money(-1000), "-1,000.00")
self.assertEquals(fmt_money(-10000), "-10,000.00")
self.assertEquals(fmt_money(-100000), "-100,000.00")
self.assertEquals(fmt_money(-1000000), "-1,000,000.00")
self.assertEquals(fmt_money(-10000000), "-10,000,000.00")
self.assertEquals(fmt_money(-100000000), "-100,000,000.00")
self.assertEquals(fmt_money(-1000000000), "-1,000,000,000.00")
def test_decimal(self):
webnotes.conn.set_default("number_format", "#.###,##")
self.assertEquals(fmt_money(-100), "-100,00")
self.assertEquals(fmt_money(-1000), "-1.000,00")
self.assertEquals(fmt_money(-10000), "-10.000,00")
self.assertEquals(fmt_money(-100000), "-100.000,00")
self.assertEquals(fmt_money(-1000000), "-1.000.000,00")
self.assertEquals(fmt_money(-10000000), "-10.000.000,00")
self.assertEquals(fmt_money(-100000000), "-100.000.000,00")
self.assertEquals(fmt_money(-1000000000), "-1.000.000.000,00")
def test_lacs(self):
webnotes.conn.set_default("number_format", "#,##,###.##")
self.assertEquals(fmt_money(100), "100.00")
self.assertEquals(fmt_money(1000), "1,000.00")
self.assertEquals(fmt_money(10000), "10,000.00")
self.assertEquals(fmt_money(100000), "1,00,000.00")
self.assertEquals(fmt_money(1000000), "10,00,000.00")
self.assertEquals(fmt_money(10000000), "1,00,00,000.00")
self.assertEquals(fmt_money(100000000), "10,00,00,000.00")
self.assertEquals(fmt_money(1000000000), "1,00,00,00,000.00")
def test_no_precision(self):
webnotes.conn.set_default("number_format", "#,###")
self.assertEquals(fmt_money(0.3), "0")
self.assertEquals(fmt_money(100.3), "100")
self.assertEquals(fmt_money(1000.3), "1,000")
self.assertEquals(fmt_money(10000.3), "10,000")
self.assertEquals(fmt_money(-0.3), "0")
self.assertEquals(fmt_money(-100.3), "-100")
self.assertEquals(fmt_money(-1000.3), "-1,000")
if __name__=="__main__":
webnotes.connect()
unittest.main()
|
mdavoodi/konkourse-python
|
refs/heads/master
|
manage.py
|
1
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "konkourse.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
silentfuzzle/calibre
|
refs/heads/master
|
src/chardet/sbcsgroupprober.py
|
235
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from charsetgroupprober import CharSetGroupProber
from sbcharsetprober import SingleByteCharSetProber
from langcyrillicmodel import Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model
from langgreekmodel import Latin7GreekModel, Win1253GreekModel
from langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from langthaimodel import TIS620ThaiModel
from langhebrewmodel import Win1255HebrewModel
from hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, constants.True, hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber])
self.reset()
|
dalegregory/odoo
|
refs/heads/8.0
|
addons/website_event_sale/controllers/main.py
|
233
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website_event.controllers.main import website_event
from openerp.addons.website_sale.controllers.main import get_pricelist
from openerp.tools.translate import _
class website_event(website_event):
@http.route(['/event/<model("event.event"):event>/register'], type='http', auth="public", website=True)
def event_register(self, event, **post):
pricelist_id = int(get_pricelist())
values = {
'event': event.with_context(pricelist=pricelist_id),
'main_object': event.with_context(pricelist=pricelist_id),
'range': range,
}
return request.website.render("website_event.event_description_full", values)
@http.route(['/event/cart/update'], type='http', auth="public", methods=['POST'], website=True)
def cart_update(self, event_id, **post):
cr, uid, context = request.cr, request.uid, request.context
ticket_obj = request.registry.get('event.event.ticket')
sale = False
for key, value in post.items():
quantity = int(value or "0")
if not quantity:
continue
sale = True
ticket_id = key.split("-")[0] == 'ticket' and int(key.split("-")[1]) or None
ticket = ticket_obj.browse(cr, SUPERUSER_ID, ticket_id, context=context)
order = request.website.sale_get_order(force_create=1)
order.with_context(event_ticket_id=ticket.id)._cart_update(product_id=ticket.product_id.id, add_qty=quantity)
if not sale:
return request.redirect("/event/%s" % event_id)
return request.redirect("/shop/checkout")
def _add_event(self, event_name="New Event", context={}, **kwargs):
try:
dummy, res_id = request.registry.get('ir.model.data').get_object_reference(request.cr, request.uid, 'event_sale', 'product_product_event')
context['default_event_ticket_ids'] = [[0,0,{
'name': _('Subscription'),
'product_id': res_id,
'deadline' : False,
'seats_max': 1000,
'price': 0,
}]]
except ValueError:
pass
return super(website_event, self)._add_event(event_name, context, **kwargs)
|
loopCM/chromium
|
refs/heads/trunk
|
tools/telemetry/telemetry/page/page_measurement_value.py
|
31
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import perf_tests_helper
def _Mean(l):
return float(sum(l)) / len(l) if len(l) > 0 else 0.0
class PageMeasurementValue(object):
def __init__(self, trace_name, units, value, chart_name, data_type):
self.trace_name = trace_name
self.units = units
self.value = value
self.chart_name = chart_name
self.data_type = data_type
@property
def measurement_name(self):
if self.chart_name:
return '%s.%s' % (self.chart_name, self.trace_name)
else:
return self.trace_name
@property
def output_value(self):
if 'histogram' in self.data_type:
(mean, _) = perf_tests_helper.GeomMeanAndStdDevFromHistogram(self.value)
return mean
elif isinstance(self.value, list):
return _Mean(self.value)
else:
return self.value
|
markrawlingson/SickRage
|
refs/heads/master
|
lib/chardet/euckrprober.py
|
53
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKR_SM_MODEL
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
super(EUCKRProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
self.distribution_analyzer = EUCKRDistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "EUC-KR"
|
evernym/plenum
|
refs/heads/master
|
plenum/test/transactions/test_txn_general_access_utils.py
|
2
|
import pytest
from plenum.common.constants import NYM, NODE
from plenum.common.txn_util import get_type, set_type, get_payload_data, \
get_from, get_req_id, get_seq_no, get_txn_id, \
get_txn_time, get_version, get_digest, get_protocol_version, get_payload_digest
from plenum.common.util import SortedDict
@pytest.fixture()
def txn():
return {
"reqSignature": {
"type": "ED25519",
"values": [{
"from": "6ouriXMZkLeHsuXrN1X1fd",
"value": "2DaRm3nt6H5fJu2TP5vxqbaDCtABPYmUTSX4ocnY8fVGgyJMVNaeh2z6JZhcW1gbmGKJcZopZMKZJwADuXFFJobM"
}]
},
"txn": {
"data": {
"type": NYM,
"something": "nothing",
},
"metadata": {
"from": "6ouriXMZkLeHsuXrN1X1fd",
"reqId": 1513945121191691,
"digest": "d3a6c519da23eacfc3e8dc3d3394fdb9ca1d8819bb9628f1fa6187c7e6dcf602",
"payloadDigest": "58232927bdccad16998a284e807a4e256d138a894c2bf41bbbf9db7cfab59c9c"
},
"protocolVersion": "2",
"type": "1",
},
"txnMetadata": {
"seqNo": 144,
"txnId": "aaaaa",
"txnTime": 1513945121,
},
"ver": "2"
}
@pytest.fixture()
def legacy_txn(txn):
result = txn
result["txn"]["metadata"] = {
"from": "6ouriXMZkLeHsuXrN1X1fd",
"reqId": 1513945121191691,
"digest": "58232927bdccad16998a284e807a4e256d138a894c2bf41bbbf9db7cfab59c9c"
}
return result
def test_get_type(txn):
assert get_type(txn) == NYM
def test_set_type(txn):
txn = set_type(txn, NODE)
assert get_type(txn) == NODE
def test_get_payload_data(txn):
expected_paylaod_data = SortedDict({
"type": NYM,
"something": "nothing",
})
assert SortedDict(get_payload_data(txn)) == expected_paylaod_data
def test_get_from(txn):
assert get_from(txn) == "6ouriXMZkLeHsuXrN1X1fd"
def test_get_from_none(txn):
txn["txn"]["metadata"].pop("from", None)
assert get_from(txn) is None
def test_get_req_id(txn):
assert get_req_id(txn) == 1513945121191691
def test_get_req_id_none(txn):
txn["txn"]["metadata"].pop("reqId", None)
assert get_req_id(txn) is None
def test_get_seq_no(txn):
assert get_seq_no(txn) == 144
def test_get_seq_no_none(txn):
txn["txnMetadata"].pop("seqNo", None)
assert get_seq_no(txn) is None
def test_get_txn_time(txn):
assert get_txn_time(txn) == 1513945121
def test_get_txn_time_none(txn):
txn["txnMetadata"].pop("txnTime", None)
assert get_txn_time(txn) is None
def test_get_txn_id(txn):
assert get_txn_id(txn) == "aaaaa"
def test_get_txn_id_none(txn):
txn["txnMetadata"].pop("txnId", None)
assert get_txn_id(txn) is None
def test_get_txn_version(txn):
assert get_version(txn) == "2"
def test_get_protocol_version(txn):
assert get_protocol_version(txn) == "2"
def test_get_digest(txn):
assert get_digest(txn) == "d3a6c519da23eacfc3e8dc3d3394fdb9ca1d8819bb9628f1fa6187c7e6dcf602"
def test_get_payload_digest(txn):
assert get_payload_digest(txn) == "58232927bdccad16998a284e807a4e256d138a894c2bf41bbbf9db7cfab59c9c"
def test_get_digest_old(legacy_txn):
assert get_digest(legacy_txn) == None
def test_get_payload_digest_old(legacy_txn):
assert get_payload_digest(legacy_txn) == "58232927bdccad16998a284e807a4e256d138a894c2bf41bbbf9db7cfab59c9c"
|
awesome-labs/LFTimePicker
|
refs/heads/master
|
node_modules/markdown-to-json/node_modules/yaml-front-matter/node_modules/js-yaml/support/pyyaml-src/reader.py
|
272
|
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
# We define two classes here.
#
# Mark(source, line, column)
# It's just a record and its only use is producing nice error messages.
# Parser does not use it for any other purposes.
#
# Reader(source, data)
# Reader determines the encoding of `data` and converts it to unicode.
# Reader provides the following methods and attributes:
# reader.peek(length=1) - return the next `length` characters
# reader.forward(length=1) - move the current position to `length` characters.
# reader.index - the number of the current character.
# reader.line, stream.column - the line and the column of the current character.
__all__ = ['Reader', 'ReaderError']
from .error import YAMLError, Mark
import codecs, re
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
if isinstance(self.character, bytes):
return "'%s' codec can't decode byte #x%02x: %s\n" \
" in \"%s\", position %d" \
% (self.encoding, ord(self.character), self.reason,
self.name, self.position)
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
% (self.character, self.reason,
self.name, self.position)
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a `bytes` object,
# - a `str` object,
# - a file-like object with its `read` method returning `str`,
# - a file-like object with its `read` method returning `unicode`.
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.raw_buffer = None
self.raw_decode = None
self.encoding = None
self.index = 0
self.line = 0
self.column = 0
if isinstance(stream, str):
self.name = "<unicode string>"
self.check_printable(stream)
self.buffer = stream+'\0'
elif isinstance(stream, bytes):
self.name = "<byte string>"
self.raw_buffer = stream
self.determine_encoding()
else:
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
self.determine_encoding()
def peek(self, index=0):
try:
return self.buffer[self.pointer+index]
except IndexError:
self.update(index+1)
return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer+length]
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in '\n\x85\u2028\u2029' \
or (ch == '\r' and self.buffer[self.pointer] != '\n'):
self.line += 1
self.column = 0
elif ch != '\uFEFF':
self.column += 1
length -= 1
def get_mark(self):
if self.stream is None:
return Mark(self.name, self.index, self.line, self.column,
self.buffer, self.pointer)
else:
return Mark(self.name, self.index, self.line, self.column,
None, None)
def determine_encoding(self):
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, bytes):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
def check_printable(self, data):
match = self.NON_PRINTABLE.search(data)
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
data, converted = self.raw_decode(self.raw_buffer,
'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
if self.stream is not None:
position = self.stream_pointer-len(self.raw_buffer)+exc.start
else:
position = exc.start
raise ReaderError(self.name, position, character,
exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
self.check_printable(data)
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
#try:
# import psyco
# psyco.bind(Reader)
#except ImportError:
# pass
|
ThiefMaster/indico
|
refs/heads/master
|
indico/modules/rb/operations/admin.py
|
4
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import datetime, time
from indico.core.db import db
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.core.permissions import get_unified_permissions, update_principals_permissions
from indico.modules.rb.models.equipment import EquipmentType
from indico.modules.rb.models.map_areas import MapArea
from indico.modules.rb.models.room_bookable_hours import BookableHours
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
@no_autoflush
def _populate_room(room, properties):
for prop, value in properties.items():
if prop not in ['available_equipment', 'bookable_hours', 'bookable_periods']:
setattr(room, prop, value)
def update_room_equipment(room, available_equipment_ids):
available_equipment = EquipmentType.query.filter(EquipmentType.id.in_(available_equipment_ids)).all()
room.available_equipment = available_equipment
db.session.flush()
def update_room_attributes(room, attributes):
current_attributes = {x.attribute.name for x in room.attributes}
new_attributes = {attribute['name'] for attribute in attributes}
deleted_attributes = current_attributes - new_attributes
for attribute in attributes:
room.set_attribute_value(attribute['name'], attribute['value'])
for deleted_attribute in deleted_attributes:
room.set_attribute_value(deleted_attribute, None)
db.session.flush()
def update_room_availability(room, availability):
if 'bookable_hours' in availability:
room.bookable_hours.order_by(False).delete()
unique_bh = {(hours['start_time'], hours['end_time']) for hours in availability['bookable_hours']}
db.session.add_all(
[BookableHours(room=room, start_time=hours[0], end_time=hours[1]) for hours in unique_bh])
if 'nonbookable_periods' in availability:
room.nonbookable_periods.order_by(False).delete()
unique_nbp = {(period['start_dt'], period['end_dt']) for period in availability['nonbookable_periods']}
db.session.add_all(
[NonBookablePeriod(room=room, start_dt=datetime.combine(period[0], time(0, 0)),
end_dt=datetime.combine(period[1], time(23, 59))) for period in unique_nbp])
def update_room(room, args):
acl_entries = args.pop('acl_entries', None)
if acl_entries:
current = {e.principal: get_unified_permissions(e) for e in room.acl_entries}
update_principals_permissions(room, current, acl_entries)
_populate_room(room, args)
db.session.flush()
def create_area(bounds, name, default=False):
top, bottom = bounds['north_east'], bounds['south_west']
if default:
MapArea.query.update({MapArea.is_default: False}, synchronize_session='fetch')
new_area = MapArea()
new_area.name = name
new_area.is_default = default
new_area.top_left_latitude = top['lat']
new_area.top_left_longitude = top['lng']
new_area.bottom_right_latitude = bottom['lat']
new_area.bottom_right_longitude = bottom['lng']
db.session.add(new_area)
db.session.flush()
return new_area
def update_area(area_id, area_data):
top = area_data['bounds']['north_east']
bottom = area_data['bounds']['south_west']
map_area = MapArea.get_or_404(area_id)
if 'name' in area_data:
map_area.name = area_data['name']
if 'default' in area_data:
if area_data['default']:
MapArea.query.update({MapArea.is_default: False}, synchronize_session='fetch')
map_area.is_default = area_data['default']
map_area.top_left_latitude = top['lat']
map_area.top_left_longitude = top['lng']
map_area.bottom_right_latitude = bottom['lat']
map_area.bottom_right_longitude = bottom['lng']
db.session.flush()
def delete_areas(area_ids):
MapArea.query.filter(MapArea.id.in_(area_ids)).delete(synchronize_session='fetch')
db.session.flush()
|
NickShaffner/rhea
|
refs/heads/master
|
rhea/utils/keep.py
|
2
|
import myhdl
from myhdl import Signal, SignalType, intbv, always_comb, ConcatSignal
@myhdl.block
def keep_port_names(**ports):
""" touch the top-level ports so they are persevered """
gens, width, catsig = [], 0, None
# walk through all the ports
for name, port in ports.items():
if isinstance(port, (int, str, list, tuple,)):
pass
elif isinstance(port, SignalType):
width += len(port)
if catsig is None:
catsig = port
else:
catsig = ConcatSignal(catsig, port)
else:
g = keep_port_names(**vars(port))
gens.append(g)
if width > 0:
monsig = Signal(intbv(0)[width:])
@always_comb
def mon():
monsig.next = catsig
gens.append(mon)
return gens
|
bitifirefly/edx-platform
|
refs/heads/master
|
common/djangoapps/embargo/migrations/0004_migrate_embargo_config.py
|
102
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Move the current course embargo configuration to the new models. """
for old_course in orm.EmbargoedCourse.objects.all():
new_course, __ = orm.RestrictedCourse.objects.get_or_create(course_key=old_course.course_id)
# Set the message keys to 'embargo'
new_course.enroll_msg_key = 'embargo'
new_course.access_msg_key = 'embargo'
new_course.save()
for country in self._embargoed_countries_list(orm):
country_model = orm.Country.objects.get(country=country)
orm.CountryAccessRule.objects.get_or_create(
country=country_model,
rule_type='blacklist',
restricted_course=new_course
)
def backwards(self, orm):
"""No backwards migration required since the forward migration is idempotent. """
pass
def _embargoed_countries_list(self, orm):
"""Retrieve the list of embargoed countries from the existing tables. """
# We need to replicate some application logic here, because South
# doesn't give us access to class methods on the Django model objects.
try:
current_config = orm.EmbargoedState.objects.order_by('-change_date')[0]
if current_config.enabled and current_config.embargoed_countries:
return [
country.strip().upper() for country
in current_config.embargoed_countries.split(',')
]
except IndexError:
pass
return []
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'ordering': "['country']", 'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'country'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '255'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo']
symmetrical = True
|
eneldoserrata/marcos_openerp
|
refs/heads/master
|
addons/report_geraldo/lib/geraldo/site/newsite/site-geraldo/django/db/models/fields/related.py
|
13
|
from django.db import connection, transaction
from django.db.models import signals, get_model
from django.db.models.fields import AutoField, Field, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy, string_concat, ungettext, ugettext as _
from django.utils.functional import curry
from django.core import exceptions
from django import forms
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Add an accessor to allow easy determination of the related query path for this field
self.related_query_name = curry(self._get_related_query_name, cls._meta)
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {'class': cls.__name__.lower()}
other = self.rel.to
if isinstance(other, basestring):
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, related)
def get_db_prep_lookup(self, lookup_type, value):
# If we are doing a lookup on a Related Field, we must be
# comparing object instances. The value should be the PK of value,
# not value itself.
def pk_trace(value):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v, field = value, None
try:
while True:
v, field = getattr(v, v._meta.pk.name), v._meta.pk
except AttributeError:
pass
if field:
if lookup_type in ('range', 'in'):
v = [v]
v = field.get_db_prep_lookup(lookup_type, v)
if isinstance(v, list):
v = v[0]
return v
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitally allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt']:
return [pk_trace(value)]
if lookup_type in ('range', 'in'):
return [pk_trace(v) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError, "Related Field has invalid lookup: %s" % lookup_type
def _get_related_query_name(self, opts):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = '_%s_cache' % related.get_accessor_name()
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
rel_obj = self.related.model._default_manager.get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
# Set the value of the related field
setattr(value, self.related.field.rel.get_related_field().attname, instance)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.field.name
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self._field.name
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
rel_field = self.related.field
rel_model = self.related.model
# Dynamically create a class that subclasses the related
# model's default manager.
superclass = self.related.model._default_manager.__class__
class RelatedManager(superclass):
def get_query_set(self):
return superclass.get_query_set(self).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
return super(RelatedManager, self).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
return super(RelatedManager, self).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist, "%r is not related to %r." % (obj, instance)
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def create_many_related_manager(superclass, through=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.through = through
self._pk_val = self.instance._get_pk_val()
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
return superclass.get_query_set(self)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if through is None:
def add(self, *objs):
self._add_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_col_name, self.source_col_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_col_name, self.target_col_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_col_name, self.source_col_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_col_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_col_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if through is not None:
raise AttributeError, "Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
new_obj = super(ManyRelatedManager, self).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
obj, created = \
super(ManyRelatedManager, self).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_col_name, target_col_name, *objs):
# join_table: name of the m2m link table
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
new_ids.add(obj._get_pk_val())
else:
new_ids.add(obj)
# Add the newly created or already existing objects to the join table.
# First find out which items are already added, to avoid adding them twice
cursor = connection.cursor()
cursor.execute("SELECT %s FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(target_col_name, self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(new_ids))),
[self._pk_val] + list(new_ids))
existing_ids = set([row[0] for row in cursor.fetchall()])
# Add the ones that aren't there already
for obj_id in (new_ids - existing_ids):
cursor.execute("INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % \
(self.join_table, source_col_name, target_col_name),
[self._pk_val, obj_id])
transaction.commit_unless_managed()
def _remove_items(self, source_col_name, target_col_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj._get_pk_val())
else:
old_ids.add(obj)
# Remove the specified objects from the join table
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s AND %s IN (%s)" % \
(self.join_table, source_col_name,
target_col_name, ",".join(['%s'] * len(old_ids))),
[self._pk_val] + list(old_ids))
transaction.commit_unless_managed()
def _clear_items(self, source_col_name):
# source_col_name: the PK colname in join_table for the source object
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE %s = %%s" % \
(self.join_table, source_col_name),
[self._pk_val])
transaction.commit_unless_managed()
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel.through)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
join_table=qn(self.related.field.m2m_db_table()),
source_col_name=qn(self.related.field.m2m_reverse_name()),
target_col_name=qn(self.related.field.m2m_column_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
through = getattr(self.related.field.rel, 'through', None)
if through is not None:
raise AttributeError, "Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel.through)
qn = connection.ops.quote_name
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=(self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table=qn(self.field.m2m_db_table()),
source_col_name=qn(self.field.m2m_column_name()),
target_col_name=qn(self.field.m2m_reverse_name())
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
through = getattr(self.field.rel, 'through', None)
if through is not None:
raise AttributeError, "Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s's Manager instead." % through
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None,
limit_choices_to=None, lookup_overrides=None, parent_link=False):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.multiple = True
self.parent_link = parent_link
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None,
limit_choices_to=None, lookup_overrides=None, parent_link=False):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
lookup_overrides=lookup_overrides, parent_link=parent_link)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
parent_link=kwargs.pop('parent_link', False))
Field.__init__(self, **kwargs)
self.db_index = True
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
def formfield(self, **kwargs):
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.complex_filter(
self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type()
return rel_field.db_type()
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
if not cls._meta.one_to_one_field:
cls._meta.one_to_one_field = self
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
class ManyToManyField(RelatedField, Field):
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
self.creates_table = False
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
else:
self.creates_table = True
Field.__init__(self, **kwargs)
msg = ugettext_lazy('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through_model._meta.db_table
elif self.db_table:
return self.db_table
else:
return '%s_%s' % (opts.db_table, self.name)
def _get_m2m_column_name(self, related):
"Function that can be curried to provide the source column name for the m2m table"
try:
return self._m2m_column_name_cache
except:
if self.rel.through is not None:
for f in self.rel.through_model._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
self._m2m_column_name_cache = f.column
break
# If this is an m2m relation to self, avoid the inevitable name clash
elif related.model == related.parent_model:
self._m2m_column_name_cache = 'from_' + related.model._meta.object_name.lower() + '_id'
else:
self._m2m_column_name_cache = related.model._meta.object_name.lower() + '_id'
# Return the newly cached value
return self._m2m_column_name_cache
def _get_m2m_reverse_name(self, related):
"Function that can be curried to provide the related column name for the m2m table"
try:
return self._m2m_reverse_name_cache
except:
if self.rel.through is not None:
found = False
for f in self.rel.through_model._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
self._m2m_reverse_name_cache = f.column
break
else:
found = True
else:
self._m2m_reverse_name_cache = f.column
break
# If this is an m2m relation to self, avoid the inevitable name clash
elif related.model == related.parent_model:
self._m2m_reverse_name_cache = 'to_' + related.parent_model._meta.object_name.lower() + '_id'
else:
self._m2m_reverse_name_cache = related.parent_model._meta.object_name.lower() + '_id'
# Return the newly cached value
return self._m2m_reverse_name_cache
def isValidIDList(self, field_data, all_data):
"Validates that the value is a valid list of foreign keys"
mod = self.rel.to
try:
pks = map(int, field_data.split(','))
except ValueError:
# the CommaSeparatedIntegerField validator will catch this error
return
objects = mod._default_manager.in_bulk(pks)
if len(objects) != len(pks):
badkeys = [k for k in pks if k not in objects]
raise exceptions.ValidationError(
ungettext("Please enter valid %(self)s IDs. The value %(value)r is invalid.",
"Please enter valid %(self)s IDs. The values %(value)r are invalid.",
len(badkeys)) % {
'self': self.verbose_name,
'value': len(badkeys) == 1 and badkeys[0] or tuple(badkeys),
})
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and self.rel.to == "self" and self.rel.related_name is None:
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through_model = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
elif self.rel.through:
self.rel.through_model = self.rel.through
self.rel.through = self.rel.through._meta.object_name
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# m2m relations to self do not have a ManyRelatedObjectsDescriptor,
# as it would be redundant - unless the field is non-symmetrical.
if related.model != related.parent_model or not self.rel.symmetrical:
# Add the descriptor for the m2m relation
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_column_name, related)
self.m2m_reverse_name = curry(self._get_m2m_reverse_name, related)
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ModelMultipleChoiceField, 'queryset': self.rel.to._default_manager.complex_filter(self.rel.limit_choices_to)}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
defaults['initial'] = [i._get_pk_val() for i in defaults['initial']]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
|
georgelegrand/first_gog
|
refs/heads/master
|
xor.py
|
1
|
def text_to_bits(text, encoding='utf-8', errors='surrogatepass'):
bits = bin(int.from_bytes(text.encode(encoding, errors), 'big'))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):
n = int(bits, 2)
return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode(encoding, errors) or '\0'
def key_check(msg2bits,key2bits):
if len(msg2bits) > len(key2bits):
n = len(msg2bits) // len(key2bits)
add = len(msg2bits) % len(key2bits)
new_key2bits = (key2bits * n) + key2bits[:add]
print("new_key2bits: ", new_key2bits)
print(len(msg2bits), len(new_key2bits))
return new_key2bits
else:
return key2bits
def gamma_xor(msg2bits, key2bits):
msg_num = int(msg2bits,2)
key_num = int(key2bits,2)
print(bin(msg_num ^ key_num))
res_xor = bin(msg_num ^ key_num)[2:]
print(res_xor)
print(text_from_bits(res_xor))
return text_from_bits(res_xor)
msg = input("Enter the message for encryption: ")
msg2bits = text_to_bits(msg)
print(type(msg2bits), msg2bits)
key = input("Enter the key: ")
key2bits = text_to_bits(key)
print(type(key2bits), key2bits)
key2bits = key_check(msg2bits,key2bits)
crypted_msg = gamma_xor(msg2bits, key2bits)
d_msg = input("Enter the message for decryption: ")
d_msg2bits = text_to_bits(crypted_msg)
#print(type(msg2bits), msg2bits)
decrypted_msg = gamma_xor(d_msg2bits, key2bits)
#print(text_to_bits('hello'))
#print(text_from_bits('110100001100101011011000110110001101111'))
|
marcosmodesto/django-testapp
|
refs/heads/master
|
django/django/contrib/gis/geometry/backend/__init__.py
|
388
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
geom_backend = getattr(settings, 'GEOMETRY_BACKEND', 'geos')
try:
module = import_module('.%s' % geom_backend, 'django.contrib.gis.geometry.backend')
except ImportError, e:
try:
module = import_module(geom_backend)
except ImportError, e_user:
raise ImproperlyConfigured('Could not import user-defined GEOMETRY_BACKEND '
'"%s".' % geom_backend)
try:
Geometry = module.Geometry
GeometryException = module.GeometryException
except AttributeError:
raise ImproperlyConfigured('Cannot import Geometry from the "%s" '
'geometry backend.' % geom_backend)
|
zlatnaspirala/visualjs
|
refs/heads/master
|
project/project_instance_webgl/res/blenderProjects/TOOLS/io_three/exporter/api/material.py
|
58
|
from bpy import data, types
from .. import constants, logger
from .constants import MULTIPLY, WIRE, IMAGE
def _material(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
material = None
if isinstance(name, types.Material):
material = name
elif name:
material = data.materials[name]
return func(material, *args, **kwargs) if material else None
return inner
@_material
def blending(material):
"""
:param material:
:return: THREE_blending_type value
"""
logger.debug("material.blending(%s)", material)
try:
blend = material.THREE_blending_type
except AttributeError:
logger.debug("No THREE_blending_type attribute found")
blend = constants.NORMAL_BLENDING
return blend
@_material
def bump_map(material):
"""
:param material:
:return: texture node for bump
"""
logger.debug("material.bump_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and not \
texture.texture.use_normal_map:
return texture.texture
@_material
def bump_scale(material):
"""
:param material:
:rtype: float
"""
return normal_scale(material)
@_material
def depth_test(material):
"""
:param material:
:return: THREE_depth_test value
:rtype: bool
"""
logger.debug("material.depth_test(%s)", material)
try:
test = material.THREE_depth_test
except AttributeError:
logger.debug("No THREE_depth_test attribute found")
test = True
return test
@_material
def depth_write(material):
"""
:param material:
:return: THREE_depth_write value
:rtype: bool
"""
logger.debug("material.depth_write(%s)", material)
try:
write = material.THREE_depth_write
except AttributeError:
logger.debug("No THREE_depth_write attribute found")
write = True
return write
@_material
def double_sided(material):
"""
:param material:
:return: THREE_double_sided value
:rtype: bool
"""
logger.debug("material.double_sided(%s)", material)
try:
write = material.THREE_double_sided
except AttributeError:
logger.debug("No THREE_double_sided attribute found")
write = False
return write
@_material
def diffuse_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.diffuse_color(%s)", material)
return (material.diffuse_intensity * material.diffuse_color[0],
material.diffuse_intensity * material.diffuse_color[1],
material.diffuse_intensity * material.diffuse_color[2])
@_material
def diffuse_map(material):
"""
:param material:
:return: texture node for map
"""
logger.debug("material.diffuse_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and not \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def emissive_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.emissive_color(%s)", material)
diffuse = diffuse_color(material)
return (material.emit * diffuse[0],
material.emit * diffuse[1],
material.emit * diffuse[2])
@_material
def light_map(material):
"""
:param material:
:return: texture node for light maps
"""
logger.debug("material.light_map(%s)", material)
for texture in _valid_textures(material, strict_use=False):
if texture.use_map_color_diffuse and \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def normal_scale(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.normal_scale(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal:
return (texture.normal_factor, texture.normal_factor)
@_material
def normal_map(material):
"""
:param material:
:return: texture node for normals
"""
logger.debug("material.normal_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and \
texture.texture.use_normal_map:
return texture.texture
@_material
def opacity(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.opacity(%s)", material)
return round(material.alpha, 2)
@_material
def shading(material):
"""
:param material:
:return: shading type (phong or lambert)
"""
logger.debug("material.shading(%s)", material)
dispatch = {
True: constants.PHONG,
False: constants.LAMBERT
}
if material.use_shadeless:
return constants.BASIC
return dispatch[material.specular_intensity > 0.0]
@_material
def specular_coef(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.specular_coef(%s)", material)
return material.specular_hardness
@_material
def specular_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.specular_color(%s)", material)
return (material.specular_intensity * material.specular_color[0],
material.specular_intensity * material.specular_color[1],
material.specular_intensity * material.specular_color[2])
@_material
def specular_map(material):
"""
:param material:
:return: texture node for specular
"""
logger.debug("material.specular_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_specular:
return texture.texture
@_material
def transparent(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.transparent(%s)", material)
return material.use_transparency
@_material
def type(material):
"""
:param material:
:return: THREE compatible shader type
"""
logger.debug("material.type(%s)", material)
if material.diffuse_shader != 'LAMBERT':
material_type = constants.BASIC
elif material.specular_intensity > 0:
material_type = constants.PHONG
else:
material_type = constants.LAMBERT
return material_type
@_material
def use_vertex_colors(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.use_vertex_colors(%s)", material)
return material.use_vertex_color_paint
def used_materials():
"""
:return: list of materials that are in use
:rtype: generator
"""
logger.debug("material.used_materials()")
for material in data.materials:
if material.users > 0:
yield material.name
@_material
def visible(material):
"""
:param material:
:return: THREE_visible value
:rtype: bool
"""
logger.debug("material.visible(%s)", material)
try:
vis = material.THREE_visible
except AttributeError:
logger.debug("No THREE_visible attribute found")
vis = True
return vis
@_material
def wireframe(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.wireframe(%s)", material)
return material.type == WIRE
def _valid_textures(material, strict_use=True):
"""
:param material:
:rtype: generator
"""
for texture in material.texture_slots:
if not texture:
continue
if strict_use:
in_use = texture.use
else:
in_use = True
if not in_use:
continue
if not texture.texture or texture.texture.type != IMAGE:
logger.warning("Unable to export non-image texture %s", texture)
continue
logger.debug("Valid texture found %s", texture)
yield texture
|
eonpatapon/nova
|
refs/heads/master
|
nova/compute/vm_mode.py
|
74
|
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible vm modes for instances.
Compute instance vm modes represent the host/guest ABI used for the
virtual machine / container. Individual hypervisors may support
multiple different vm modes per host. Available vm modes for a hypervisor
driver may also vary according to the architecture it is running on.
The 'vm_mode' parameter can be set against an instance to
choose what sort of VM to boot.
"""
from nova import exception
HVM = "hvm" # Native ABI (aka fully virtualized)
XEN = "xen" # Xen 3.0 paravirtualized
UML = "uml" # User Mode Linux paravirtualized
EXE = "exe" # Executables in containers
ALL = [HVM, XEN, UML, EXE]
def get_from_instance(instance):
"""Get the vm mode for an instance
:param instance: instance object to query
:returns: canonicalized vm mode for the instance
"""
mode = instance.vm_mode
return canonicalize(mode)
def is_valid(name):
"""Check if a string is a valid vm mode
:param name: vm mode name to validate
:returns: True if @name is valid
"""
return name in ALL
def canonicalize(mode):
"""Canonicalize the vm mode
:param name: vm mode name to canonicalize
:returns: a canonical vm mode name
"""
if mode is None:
return None
mode = mode.lower()
# For compatibility with pre-Folsom deployments
if mode == "pv":
mode = XEN
if mode == "hv":
mode = HVM
if mode == "baremetal":
mode = HVM
if not is_valid(mode):
raise exception.InvalidVirtualMachineMode(vmmode=mode)
return mode
|
Vaibhav/Project-Euler
|
refs/heads/master
|
LeetCode/Medium/200-Num-of-Islands.py
|
2
|
'''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input:
11110
11010
11000
00000
Output: 1
Example 2:
Input:
11000
11000
00100
00011
Output: 3
'''
class Solution:
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
# Check of grid is empty
if not grid:
return 0
count = 0
rows = len(grid)
cols = len(grid[0])
for i in range(rows):
for j in range(cols):
if grid[i][j] == '1':
self.dfs(grid, i, j)
count += 1
# print(grid)
return count
def dfs(self, grid, i, j):
if i<0 or j<0 or i>=len(grid) or j>=len(grid[0]) or grid[i][j] != '1':
return
grid[i][j] = '#'
self.dfs(grid, i+1, j)
self.dfs(grid, i-1, j)
self.dfs(grid, i, j+1)
self.dfs(grid, i, j-1)
|
crunchymalice/python-oauth2
|
refs/heads/master
|
oauth2/__init__.py
|
458
|
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = parse_qs(body)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
kemalakyol48/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/plat-sunos5/TYPES.py
|
108
|
# Generated by h2py from /usr/include/sys/types.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 8
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
_LONG_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_LONG_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
# Included from sys/feature_tests.h
_POSIX_C_SOURCE = 1
_LARGEFILE64_SOURCE = 1
_LARGEFILE_SOURCE = 1
_FILE_OFFSET_BITS = 64
_FILE_OFFSET_BITS = 32
_POSIX_C_SOURCE = 199506
_POSIX_PTHREAD_SEMANTICS = 1
_XOPEN_VERSION = 500
_XOPEN_VERSION = 4
_XOPEN_VERSION = 3
# Included from sys/machtypes.h
# Included from sys/inttypes.h
# Included from sys/int_types.h
# Included from sys/int_limits.h
INT8_MAX = (127)
INT16_MAX = (32767)
INT32_MAX = (2147483647)
INTMAX_MAX = INT32_MAX
INT_LEAST8_MAX = INT8_MAX
INT_LEAST16_MAX = INT16_MAX
INT_LEAST32_MAX = INT32_MAX
INT8_MIN = (-128)
INT16_MIN = (-32767-1)
INT32_MIN = (-2147483647-1)
INTMAX_MIN = INT32_MIN
INT_LEAST8_MIN = INT8_MIN
INT_LEAST16_MIN = INT16_MIN
INT_LEAST32_MIN = INT32_MIN
# Included from sys/int_const.h
def INT8_C(c): return (c)
def INT16_C(c): return (c)
def INT32_C(c): return (c)
def INT64_C(c): return __CONCAT__(c,l)
def INT64_C(c): return __CONCAT__(c,ll)
def UINT8_C(c): return __CONCAT__(c,u)
def UINT16_C(c): return __CONCAT__(c,u)
def UINT32_C(c): return __CONCAT__(c,u)
def UINT64_C(c): return __CONCAT__(c,ul)
def UINT64_C(c): return __CONCAT__(c,ull)
def INTMAX_C(c): return __CONCAT__(c,l)
def UINTMAX_C(c): return __CONCAT__(c,ul)
def INTMAX_C(c): return __CONCAT__(c,ll)
def UINTMAX_C(c): return __CONCAT__(c,ull)
def INTMAX_C(c): return (c)
def UINTMAX_C(c): return (c)
# Included from sys/int_fmtio.h
PRId8 = "d"
PRId16 = "d"
PRId32 = "d"
PRId64 = "ld"
PRId64 = "lld"
PRIdLEAST8 = "d"
PRIdLEAST16 = "d"
PRIdLEAST32 = "d"
PRIdLEAST64 = "ld"
PRIdLEAST64 = "lld"
PRIi8 = "i"
PRIi16 = "i"
PRIi32 = "i"
PRIi64 = "li"
PRIi64 = "lli"
PRIiLEAST8 = "i"
PRIiLEAST16 = "i"
PRIiLEAST32 = "i"
PRIiLEAST64 = "li"
PRIiLEAST64 = "lli"
PRIo8 = "o"
PRIo16 = "o"
PRIo32 = "o"
PRIo64 = "lo"
PRIo64 = "llo"
PRIoLEAST8 = "o"
PRIoLEAST16 = "o"
PRIoLEAST32 = "o"
PRIoLEAST64 = "lo"
PRIoLEAST64 = "llo"
PRIx8 = "x"
PRIx16 = "x"
PRIx32 = "x"
PRIx64 = "lx"
PRIx64 = "llx"
PRIxLEAST8 = "x"
PRIxLEAST16 = "x"
PRIxLEAST32 = "x"
PRIxLEAST64 = "lx"
PRIxLEAST64 = "llx"
PRIX8 = "X"
PRIX16 = "X"
PRIX32 = "X"
PRIX64 = "lX"
PRIX64 = "llX"
PRIXLEAST8 = "X"
PRIXLEAST16 = "X"
PRIXLEAST32 = "X"
PRIXLEAST64 = "lX"
PRIXLEAST64 = "llX"
PRIu8 = "u"
PRIu16 = "u"
PRIu32 = "u"
PRIu64 = "lu"
PRIu64 = "llu"
PRIuLEAST8 = "u"
PRIuLEAST16 = "u"
PRIuLEAST32 = "u"
PRIuLEAST64 = "lu"
PRIuLEAST64 = "llu"
SCNd16 = "hd"
SCNd32 = "d"
SCNd64 = "ld"
SCNd64 = "lld"
SCNi16 = "hi"
SCNi32 = "i"
SCNi64 = "li"
SCNi64 = "lli"
SCNo16 = "ho"
SCNo32 = "o"
SCNo64 = "lo"
SCNo64 = "llo"
SCNu16 = "hu"
SCNu32 = "u"
SCNu64 = "lu"
SCNu64 = "llu"
SCNx16 = "hx"
SCNx32 = "x"
SCNx64 = "lx"
SCNx64 = "llx"
PRIdMAX = "ld"
PRIoMAX = "lo"
PRIxMAX = "lx"
PRIuMAX = "lu"
PRIdMAX = "lld"
PRIoMAX = "llo"
PRIxMAX = "llx"
PRIuMAX = "llu"
PRIdMAX = "d"
PRIoMAX = "o"
PRIxMAX = "x"
PRIuMAX = "u"
SCNiMAX = "li"
SCNdMAX = "ld"
SCNoMAX = "lo"
SCNxMAX = "lx"
SCNiMAX = "lli"
SCNdMAX = "lld"
SCNoMAX = "llo"
SCNxMAX = "llx"
SCNiMAX = "i"
SCNdMAX = "d"
SCNoMAX = "o"
SCNxMAX = "x"
# Included from sys/types32.h
SHRT_MIN = (-32768)
SHRT_MAX = 32767
USHRT_MAX = 65535
INT_MIN = (-2147483647-1)
INT_MAX = 2147483647
LONG_MIN = (-9223372036854775807-1)
LONG_MAX = 9223372036854775807
LONG_MIN = (-2147483647-1)
LONG_MAX = 2147483647
P_MYID = (-1)
# Included from sys/select.h
# Included from sys/time.h
TIME32_MAX = INT32_MAX
TIME32_MIN = INT32_MIN
def TIMEVAL_OVERFLOW(tv): return \
from TYPES import *
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
def ITIMERVAL_OVERFLOW(itv): return \
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000000
# Included from sys/time_impl.h
def TIMESPEC_OVERFLOW(ts): return \
def ITIMERSPEC_OVERFLOW(it): return \
__CLOCK_REALTIME0 = 0
CLOCK_VIRTUAL = 1
CLOCK_PROF = 2
__CLOCK_REALTIME3 = 3
CLOCK_HIGHRES = 4
CLOCK_MAX = 5
CLOCK_REALTIME = __CLOCK_REALTIME3
CLOCK_REALTIME = __CLOCK_REALTIME0
TIMER_RELTIME = 0x0
TIMER_ABSTIME = 0x1
# Included from sys/mutex.h
from TYPES import *
def MUTEX_HELD(x): return (mutex_owned(x))
def TICK_TO_SEC(tick): return ((tick) / hz)
def SEC_TO_TICK(sec): return ((sec) * hz)
def TICK_TO_MSEC(tick): return \
def MSEC_TO_TICK(msec): return \
def MSEC_TO_TICK_ROUNDUP(msec): return \
def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
def USEC_TO_TICK_ROUNDUP(usec): return \
def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
def NSEC_TO_TICK_ROUNDUP(nsec): return \
def TIMEVAL_TO_TICK(tvp): return \
def TIMESTRUC_TO_TICK(tsp): return \
# Included from time.h
from TYPES import *
# Included from iso/time_iso.h
NULL = 0
NULL = 0
CLOCKS_PER_SEC = 1000000
FD_SETSIZE = 65536
FD_SETSIZE = 1024
_NBBY = 8
NBBY = _NBBY
def FD_ZERO(p): return bzero((p), sizeof (*(p)))
|
wzyy2/RTTdev
|
refs/heads/master
|
bsp/simulator/rtconfig.py
|
1
|
import os
# toolchains options
ARCH='sim'
#CROSS_TOOL='msvc' or 'gcc' or 'mingw'
#'msvc' and 'mingw' are both for windows
# 'gcc' is for linux
CROSS_TOOL='mingw'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path
if CROSS_TOOL == 'gcc' or CROSS_TOOL == 'clang-analyze':
CPU = 'posix'
PLATFORM = 'gcc'
EXEC_PATH = ''
elif CROSS_TOOL == 'mingw':
CPU = 'win32'
PLATFORM = 'mingw'
EXEC_PATH = r'D:\Program Files (x86)\CodeBlocks\MinGW\bin'
elif CROSS_TOOL == 'msvc':
CPU = 'win32'
PLATFORM = 'cl'
EXEC_PATH = ''
else:
print "bad CROSS TOOL!"
exit(1)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
#BUILD = ''
if PLATFORM == 'gcc':
# toolchains
PREFIX = ''
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -ffunction-sections -fdata-sections'
DEVICE = ' '
CFLAGS = DEVICE + ' -I/usr/include -w -D_REENTRANT'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
#LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-linux.map -lpthread'
LFLAGS = DEVICE + ' -Wl,-Map=rtthread-linux.map -pthread -T gcc.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -g -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = ''
elif PLATFORM == 'mingw':
# toolchains
PREFIX = ''
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'exe'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -ffunction-sections -fdata-sections'
DEVICE = ' '
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
DEFFILE_LFLAGS = DEVICE + ' -Wl,-Map=rtthread-win32.map,--output-def,rtthread.def -T mingw.ld '
LFLAGS = DEVICE + ' -Wl,-Map=rtthread-win32.map -T mingw.ld '
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -g -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = ''
elif PLATFORM == 'cl':
# toolchains
PREFIX = ''
TARGET_EXT = 'exe'
AS = PREFIX + 'cl'
CC = PREFIX + 'cl'
AR = PREFIX + 'cl'
LINK = PREFIX + 'cl'
AFLAGS = ''
CFLAGS = ''
LFLAGS = ''
if BUILD == 'debug':
CFLAGS += ' /MTd'
LFLAGS += ' /DEBUG'
else:
CFLAGS += ' /MT'
LFLAGS += ''
CFLAGS += ' /ZI /Od /W 3 /WL '
LFLAGS += ' /SUBSYSTEM:CONSOLE /MACHINE:X86 '
CPATH = ''
LPATH = ''
POST_ACTION = ''
|
michaelBenin/django-oscar
|
refs/heads/master
|
tests/integration/catalogue/product_class_tests.py
|
70
|
from django.test import TestCase
from oscar.apps.catalogue import models
class TestProductClassModel(TestCase):
def test_slug_is_auto_created(self):
books = models.ProductClass.objects.create(
name="Book",
)
self.assertEqual('book', books.slug)
def test_has_attribute_for_whether_shipping_is_required(self):
models.ProductClass.objects.create(
name="Download",
requires_shipping=False,
)
|
Cinntax/home-assistant
|
refs/heads/dev
|
tests/components/generic/test_camera.py
|
4
|
"""The tests for generic camera component."""
import asyncio
from unittest import mock
from homeassistant.setup import async_setup_component
@asyncio.coroutine
def test_fetching_url(aioclient_mock, hass, hass_client):
"""Test that it fetches the given url."""
aioclient_mock.get("http://example.com", text="hello world")
yield from async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "http://example.com",
"username": "user",
"password": "pass",
}
},
)
client = yield from hass_client()
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert resp.status == 200
assert aioclient_mock.call_count == 1
body = yield from resp.text()
assert body == "hello world"
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 2
@asyncio.coroutine
def test_fetching_without_verify_ssl(aioclient_mock, hass, hass_client):
"""Test that it fetches the given url when ssl verify is off."""
aioclient_mock.get("https://example.com", text="hello world")
yield from async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "https://example.com",
"username": "user",
"password": "pass",
"verify_ssl": "false",
}
},
)
client = yield from hass_client()
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert resp.status == 200
@asyncio.coroutine
def test_fetching_url_with_verify_ssl(aioclient_mock, hass, hass_client):
"""Test that it fetches the given url when ssl verify is explicitly on."""
aioclient_mock.get("https://example.com", text="hello world")
yield from async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": "https://example.com",
"username": "user",
"password": "pass",
"verify_ssl": "true",
}
},
)
client = yield from hass_client()
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert resp.status == 200
@asyncio.coroutine
def test_limit_refetch(aioclient_mock, hass, hass_client):
"""Test that it fetches the given url."""
aioclient_mock.get("http://example.com/5a", text="hello world")
aioclient_mock.get("http://example.com/10a", text="hello world")
aioclient_mock.get("http://example.com/15a", text="hello planet")
aioclient_mock.get("http://example.com/20a", status=404)
yield from async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "generic",
"still_image_url": 'http://example.com/{{ states.sensor.temp.state + "a" }}',
"limit_refetch_to_url_change": True,
}
},
)
client = yield from hass_client()
resp = yield from client.get("/api/camera_proxy/camera.config_test")
hass.states.async_set("sensor.temp", "5")
with mock.patch("async_timeout.timeout", side_effect=asyncio.TimeoutError()):
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 0
assert resp.status == 500
hass.states.async_set("sensor.temp", "10")
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 1
assert resp.status == 200
body = yield from resp.text()
assert body == "hello world"
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 1
assert resp.status == 200
body = yield from resp.text()
assert body == "hello world"
hass.states.async_set("sensor.temp", "15")
# Url change = fetch new image
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 2
assert resp.status == 200
body = yield from resp.text()
assert body == "hello planet"
# Cause a template render error
hass.states.async_remove("sensor.temp")
resp = yield from client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 2
assert resp.status == 200
body = yield from resp.text()
assert body == "hello planet"
@asyncio.coroutine
def test_camera_content_type(aioclient_mock, hass, hass_client):
"""Test generic camera with custom content_type."""
svg_image = "<some image>"
urlsvg = "https://upload.wikimedia.org/wikipedia/commons/0/02/SVG_logo.svg"
aioclient_mock.get(urlsvg, text=svg_image)
cam_config_svg = {
"name": "config_test_svg",
"platform": "generic",
"still_image_url": urlsvg,
"content_type": "image/svg+xml",
}
cam_config_normal = cam_config_svg.copy()
cam_config_normal.pop("content_type")
cam_config_normal["name"] = "config_test_jpg"
yield from async_setup_component(
hass, "camera", {"camera": [cam_config_svg, cam_config_normal]}
)
client = yield from hass_client()
resp_1 = yield from client.get("/api/camera_proxy/camera.config_test_svg")
assert aioclient_mock.call_count == 1
assert resp_1.status == 200
assert resp_1.content_type == "image/svg+xml"
body = yield from resp_1.text()
assert body == svg_image
resp_2 = yield from client.get("/api/camera_proxy/camera.config_test_jpg")
assert aioclient_mock.call_count == 2
assert resp_2.status == 200
assert resp_2.content_type == "image/jpeg"
body = yield from resp_2.text()
assert body == svg_image
|
vincepandolfo/django
|
refs/heads/master
|
tests/utils_tests/test_tree.py
|
429
|
import copy
import unittest
from django.utils.tree import Node
class NodeTests(unittest.TestCase):
def setUp(self):
self.node1_children = [('a', 1), ('b', 2)]
self.node1 = Node(self.node1_children)
self.node2 = Node()
def test_str(self):
self.assertEqual(str(self.node1), "(DEFAULT: ('a', 1), ('b', 2))")
self.assertEqual(str(self.node2), "(DEFAULT: )")
def test_repr(self):
self.assertEqual(repr(self.node1),
"<Node: (DEFAULT: ('a', 1), ('b', 2))>")
self.assertEqual(repr(self.node2), "<Node: (DEFAULT: )>")
def test_len(self):
self.assertEqual(len(self.node1), 2)
self.assertEqual(len(self.node2), 0)
def test_bool(self):
self.assertTrue(self.node1)
self.assertFalse(self.node2)
def test_contains(self):
self.assertIn(('a', 1), self.node1)
self.assertNotIn(('a', 1), self.node2)
def test_add(self):
# start with the same children of node1 then add an item
node3 = Node(self.node1_children)
node3_added_child = ('c', 3)
# add() returns the added data
self.assertEqual(node3.add(node3_added_child, Node.default),
node3_added_child)
# we added exactly one item, len() should reflect that
self.assertEqual(len(self.node1) + 1, len(node3))
self.assertEqual(str(node3), "(DEFAULT: ('a', 1), ('b', 2), ('c', 3))")
def test_negate(self):
# negated is False by default
self.assertFalse(self.node1.negated)
self.node1.negate()
self.assertTrue(self.node1.negated)
self.node1.negate()
self.assertFalse(self.node1.negated)
def test_deepcopy(self):
node4 = copy.copy(self.node1)
node5 = copy.deepcopy(self.node1)
self.assertIs(self.node1.children, node4.children)
self.assertIsNot(self.node1.children, node5.children)
|
IEMLdev/propositions-restful-server
|
refs/heads/master
|
ieml/test/dictionary/test_dictionary.py
|
2
|
import unittest
from ieml.dictionary.dictionary import Dictionary
import numpy as np
from ieml.dictionary.script import Script
from ieml.ieml_database import IEMLDatabase, GitInterface
class DictionaryTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.d = IEMLDatabase(folder=GitInterface().folder).get_dictionary()
def test_scripts(self):
self.assertIsInstance(self.d.scripts, np.ndarray)
self.assertEqual(self.d.scripts.ndim, 1)
self.assertEqual(self.d.scripts.shape, (len(self.d),))
for s in self.d.scripts:
self.assertIsInstance(s, Script)
#
# def test_one_hot(self):
# for i, s in enumerate(self.d.scripts):
# oh = self.d.one_hot(s)
#
# self.assertIsInstance(oh, np.ndarray)
# self.assertEqual(oh.ndim, 1)
# self.assertEqual(oh.shape, (len(self.d),))
# self.assertEqual(oh.dtype, int)
#
# self.assertTrue(all(e == 0 for j, e in enumerate(oh) if j != i))
# # print(oh[i-2:i+2], s)
#
# self.assertEqual(oh[i], 1)
|
diorcety/intellij-community
|
refs/heads/master
|
python/testData/formatter/alignDictLiteralOnColon.py
|
79
|
{
"a": 1,
"bbb": [
2
],
"bbbbb": 3
}
|
SuriyaaKudoIsc/olympia
|
refs/heads/master
|
migrations/659-award-theme-points.py
|
113
|
#!/usr/bin/env python
def run():
return
|
actionpods/django-action-hub
|
refs/heads/master
|
hub/urls/users.py
|
1
|
from django.conf.urls import url, include
from hub.views import user
urlpatterns = [
url(r'^$', user.index, name="index"),
url(r'^(?P<user>[0-9])/$', user.detail, name="detail"),
]
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyRelativeImportInspection/PlainDirectoryDottedImportFromDotTwoElementsWithAs/plainDirectory/script.py
|
10
|
<weak_warning descr="Relative import outside of a package">from . import foo, bar as b</weak_warning>
|
StephenWeber/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/webfaction/webfaction_app.py
|
20
|
#!/usr/bin/python
#
# Create a Webfaction application using Ansible and the Webfaction API
#
# Valid application types can be found by looking here:
# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
# * Andy Baker
# * Federico Tarantini
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: webfaction_app
short_description: Add or remove applications on a Webfaction host
description:
- Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the application
required: true
state:
description:
- Whether the application should exist
required: false
choices: ['present', 'absent']
default: "present"
type:
description:
- The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list.
required: true
autostart:
description:
- Whether the app should restart with an autostart.cgi script
required: false
default: "no"
extra_info:
description:
- Any extra parameters required by the app
required: false
default: null
port_open:
description:
- IF the port should be opened
required: false
default: false
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
machine:
description:
- The machine name to use (optional for accounts with only one machine)
required: false
'''
EXAMPLES = '''
- name: Create a test app
webfaction_app:
name="my_wsgi_app1"
state=present
type=mod_wsgi35-python27
login_name={{webfaction_user}}
login_password={{webfaction_passwd}}
machine={{webfaction_machine}}
'''
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
type = dict(required=True),
autostart = dict(required=False, type='bool', default=False),
extra_info = dict(required=False, default=""),
port_open = dict(required=False, type='bool', default=False),
login_name = dict(required=True),
login_password = dict(required=True, no_log=True),
machine = dict(required=False, default=False),
),
supports_check_mode=True
)
app_name = module.params['name']
app_type = module.params['type']
app_state = module.params['state']
if module.params['machine']:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password'],
module.params['machine']
)
else:
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
app_list = webfaction.list_apps(session_id)
app_map = dict([(i['name'], i) for i in app_list])
existing_app = app_map.get(app_name)
result = {}
# Here's where the real stuff happens
if app_state == 'present':
# Does an app with this name already exist?
if existing_app:
if existing_app['type'] != app_type:
module.fail_json(msg="App already exists with different type. Please fix by hand.")
# If it exists with the right type, we don't change it
# Should check other parameters.
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, create the app
result.update(
webfaction.create_app(
session_id, app_name, app_type,
module.boolean(module.params['autostart']),
module.params['extra_info'],
module.boolean(module.params['port_open'])
)
)
elif app_state == 'absent':
# If the app's already not there, nothing changed.
if not existing_app:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the app
result.update(
webfaction.delete_app(session_id, app_name)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(app_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
dulems/hue
|
refs/heads/master
|
apps/impala/src/impala/forms.py
|
1198
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
wenxer/empire-agent
|
refs/heads/master
|
empire_agent/core/__init__.py
|
1
|
# -*- coding: utf-8 -*-
__all__ = ['produce']
|
mingderwang/angr
|
refs/heads/master
|
tests/test_project_resolve_simproc.py
|
10
|
import nose
import angr
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
bina = os.path.join(test_location, "x86_64/test_project_resolve_simproc")
"""
We voluntarily don't use SimProcedures for 'rand' and 'sleep' because we want
to step into their lib code.
"""
def test_bina():
p = angr.Project(bina, exclude_sim_procedures_list=['rand', 'sleep'], load_options={"auto_load_libs":True})
# Make sure external functions are not replaced with a SimProcedure
sleep_jmpslot = p.loader.main_bin.jmprel['sleep']
rand_jmpslot = p.loader.main_bin.jmprel['rand']
read_jmpslot = p.loader.main_bin.jmprel['read']
sleep_addr = p.loader.memory.read_addr_at(sleep_jmpslot.addr)
rand_addr = p.loader.memory.read_addr_at(rand_jmpslot.addr)
read_addr = p.loader.memory.read_addr_at(read_jmpslot.addr)
libc_sleep_addr = p.loader.shared_objects['libc.so.6'].get_symbol('sleep').rebased_addr
libc_rand_addr = p.loader.shared_objects['libc.so.6'].get_symbol('rand').rebased_addr
nose.tools.assert_equal(sleep_addr, libc_sleep_addr)
nose.tools.assert_equal(rand_addr, libc_rand_addr)
nose.tools.assert_true(p.is_hooked(read_addr))
nose.tools.assert_true("libc___so___6.read.read" in
p._sim_procedures[read_addr].__str__())
if __name__ == '__main__':
test_bina()
|
hj3938/zulip
|
refs/heads/master
|
zerver/views/__init__.py
|
42
|
from __future__ import absolute_import
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext, loader
from django.utils.timezone import now
from django.utils.cache import patch_cache_control
from django.core.exceptions import ValidationError
from django.core import validators
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.db.models import Q, F
from django.forms.models import model_to_dict
from django.core.mail import send_mail
from django.middleware.csrf import get_token
from django.db import transaction
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \
RealmFilter, bulk_get_recipients, \
PreregistrationUser, get_client, MitUser, UserActivity, PushDeviceToken, \
get_stream, bulk_get_streams, UserPresence, \
get_recipient, valid_stream_name, \
split_email_to_domain, resolve_email_to_domain, email_to_username, get_realm, \
completely_open, get_unique_open_realm, get_active_user_dicts_in_realm, remote_user_to_email
from zerver.lib.actions import bulk_remove_subscriptions, do_change_password, \
do_change_full_name, do_change_enable_desktop_notifications, do_change_is_admin, \
do_change_enter_sends, do_change_enable_sounds, do_activate_user, do_create_user, \
do_change_subscription_property, internal_send_message, \
create_stream_if_needed, gather_subscriptions, subscribed_to_stream, \
update_user_presence, bulk_add_subscriptions, do_events_register, \
get_status_dict, do_change_enable_offline_email_notifications, \
do_change_enable_digest_emails, do_set_realm_name, do_set_realm_restricted_to_domain, do_set_realm_invite_required, do_set_realm_invite_by_admins_only, internal_prep_message, \
do_send_messages, get_default_subs, do_deactivate_user, do_reactivate_user, \
user_email_is_unique, do_invite_users, do_refer_friend, compute_mit_user_fullname, \
do_add_alert_words, do_remove_alert_words, do_set_alert_words, get_subscriber_emails, \
do_set_muted_topics, do_rename_stream, clear_followup_emails_queue, \
do_change_enable_offline_push_notifications, \
do_deactivate_stream, do_change_autoscroll_forever, do_make_stream_public, \
do_add_default_stream, do_change_default_all_public_streams, \
do_change_default_desktop_notifications, \
do_change_default_events_register_stream, do_change_default_sending_stream, \
do_change_enable_stream_desktop_notifications, do_change_enable_stream_sounds, \
do_change_stream_description, do_get_streams, do_make_stream_private, \
do_regenerate_api_key, do_remove_default_stream, do_update_pointer, \
do_change_avatar_source, do_change_twenty_four_hour_time, do_change_left_side_userlist
from zerver.lib.create_user import random_api_key
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.forms import RegistrationForm, HomepageForm, ToSForm, \
CreateUserForm, is_inactive, OurAuthenticationForm
from django.views.decorators.csrf import csrf_exempt
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib import bugdown
from zerver.lib.alert_words import user_alert_words
from zerver.lib.validator import check_string, check_list, check_dict, \
check_int, check_bool, check_variable_type
from zerver.decorator import require_post, \
authenticated_api_view, authenticated_json_post_view, \
has_request_variables, authenticated_json_view, to_non_negative_int, \
JsonableError, get_user_profile_by_email, REQ, require_realm_admin, \
RequestVariableConversionError
from zerver.lib.avatar import avatar_url, get_avatar_url
from zerver.lib.upload import upload_message_image_through_web_client, upload_avatar_image, \
get_signed_upload_url, get_realm_for_filename
from zerver.lib.response import json_success, json_error, json_response
from zerver.lib.unminify import SourceMap
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import statsd, generate_random_token, statsd_key
from zproject.backends import password_auth_enabled, dev_auth_enabled
from confirmation.models import Confirmation
import requests
import subprocess
import calendar
import datetime
import ujson
import simplejson
import re
import urllib
import base64
import time
import logging
import os
import jwt
import hashlib
import hmac
from collections import defaultdict
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
def list_to_streams(streams_raw, user_profile, autocreate=False, invite_only=False):
"""Converts plaintext stream names to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name: that is, that it is shorter
than Stream.MAX_NAME_LENGTH characters and passes
valid_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream names to process
@param user_profile The user for whom we are retreiving the streams
@param autocreate Whether we should create streams if they don't already exist
@param invite_only Whether newly created streams should have the invite_only bit set
"""
existing_streams = []
created_streams = []
# Validate all streams, getting extant ones, then get-or-creating the rest.
stream_set = set(stream_name.strip() for stream_name in streams_raw)
rejects = []
for stream_name in stream_set:
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError("Stream name (%s) too long." % (stream_name,))
if not valid_stream_name(stream_name):
raise JsonableError("Invalid stream name (%s)." % (stream_name,))
existing_stream_map = bulk_get_streams(user_profile.realm, stream_set)
for stream_name in stream_set:
stream = existing_stream_map.get(stream_name.lower())
if stream is None:
rejects.append(stream_name)
else:
existing_streams.append(stream)
if autocreate:
for stream_name in rejects:
stream, created = create_stream_if_needed(user_profile.realm,
stream_name,
invite_only=invite_only)
if created:
created_streams.append(stream)
else:
existing_streams.append(stream)
elif rejects:
raise JsonableError("Stream(s) (%s) do not exist" % ", ".join(rejects))
return existing_streams, created_streams
def realm_user_count(realm):
user_dicts = get_active_user_dicts_in_realm(realm)
return len([user_dict for user_dict in user_dicts if not user_dict["is_bot"]])
def send_signup_message(sender, signups_stream, user_profile,
internal=False, realm=None):
if internal:
# When this is done using manage.py vs. the web interface
internal_blurb = " **INTERNAL SIGNUP** "
else:
internal_blurb = " "
user_count = realm_user_count(user_profile.realm)
# Send notification to realm notifications stream if it exists
# Don't send notification for the first user in a realm
if user_profile.realm.notifications_stream is not None and user_count > 1:
internal_send_message(sender, "stream",
user_profile.realm.notifications_stream.name,
"New users", "%s just signed up for Zulip. Say hello!" % \
(user_profile.full_name,),
realm=user_profile.realm)
internal_send_message(sender,
"stream", signups_stream, user_profile.realm.domain,
"%s <`%s`> just signed up for Zulip!%s(total: **%i**)" % (
user_profile.full_name,
user_profile.email,
internal_blurb,
user_count,
)
)
def notify_new_user(user_profile, internal=False):
if settings.NEW_USER_BOT is not None:
send_signup_message(settings.NEW_USER_BOT, "signups", user_profile, internal)
statsd.gauge("users.signups.%s" % (user_profile.realm.domain.replace('.', '_')), 1, delta=True)
class PrincipalError(JsonableError):
def __init__(self, principal):
self.principal = principal
def to_json_error_msg(self):
return ("User not authorized to execute queries on behalf of '%s'"
% (self.principal,))
def principal_to_user_profile(agent, principal):
principal_doesnt_exist = False
try:
principal_user_profile = get_user_profile_by_email(principal)
except UserProfile.DoesNotExist:
principal_doesnt_exist = True
if (principal_doesnt_exist
or agent.realm != principal_user_profile.realm):
# We have to make sure we don't leak information about which users
# are registered for Zulip in a different realm. We could do
# something a little more clever and check the domain part of the
# principal to maybe give a better error message
raise PrincipalError(principal)
return principal_user_profile
def name_changes_disabled(realm):
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
@require_post
def accounts_register(request):
key = request.POST['key']
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
email = prereg_user.email
mit_beta_user = isinstance(confirmation.content_object, MitUser)
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
validators.validate_email(email)
# If someone invited you, you are joining their realm regardless
# of your e-mail address.
#
# MitUsers can't be referred and don't have a referred_by field.
if not mit_beta_user and prereg_user.referred_by:
realm = prereg_user.referred_by.realm
domain = realm.domain
if realm.restricted_to_domain and domain != resolve_email_to_domain(email):
return render_to_response("zerver/closed_realm.html", {"closed_domain_name": realm.name})
elif not mit_beta_user and prereg_user.realm:
# You have a realm set, even though nobody referred you. This
# happens if you sign up through a special URL for an open
# realm.
domain = prereg_user.realm.domain
else:
domain = resolve_email_to_domain(email)
realm = get_realm(domain)
if realm and realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return render_to_response("zerver/deactivated.html",
{"deactivated_domain_name": realm.name})
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.quote_plus(email))
name_validated = False
full_name = None
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
if domain == "mit.edu":
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""})
name_validated = True
elif settings.POPULATE_PROFILE_VIA_LDAP:
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
try:
request.session['authenticated_full_name'] = ldap_attrs[settings.AUTH_LDAP_USER_ATTR_MAP['full_name']][0]
name_validated = True
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm(
{'full_name': request.session['authenticated_full_name']})
# FIXME: This will result in the user getting
# validation errors if they have to enter a password.
# Not relevant for ONLY_SSO, though.
break
except TypeError:
# Let the user fill out a name and/or try another backend
form = RegistrationForm()
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')}
)
else:
form = RegistrationForm()
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
# back to the full name in the request.
try:
postdata.update({'full_name': request.session['authenticated_full_name']})
name_validated = True
except KeyError:
pass
form = RegistrationForm(postdata)
if not password_auth_enabled(realm):
form['password'].field.required = False
if form.is_valid():
if password_auth_enabled(realm):
password = form.cleaned_data['password']
else:
# SSO users don't need no passwords
password = None
full_name = form.cleaned_data['full_name']
short_name = email_to_username(email)
first_in_realm = len(UserProfile.objects.filter(realm=realm, is_bot=False)) == 0
# FIXME: sanitize email addresses and fullname
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
try:
user_profile = existing_user_profile
do_activate_user(user_profile)
do_change_password(user_profile, password)
do_change_full_name(user_profile, full_name)
except UserProfile.DoesNotExist:
user_profile = do_create_user(email, password, realm, full_name, short_name)
else:
user_profile = do_create_user(email, password, realm, full_name, short_name)
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
# This logs you in using the ZulipDummyBackend, since honestly nothing
# more fancy than this is required.
login(request, authenticate(username=user_profile.email, use_dummy_backend=True))
if first_in_realm:
do_change_is_admin(user_profile, True)
return HttpResponseRedirect(reverse('zerver.views.initial_invite_page'))
else:
return HttpResponseRedirect(reverse('zerver.views.home'))
return render_to_response('zerver/register.html',
{'form': form,
'company_name': domain,
'email': email,
'key': key,
'full_name': request.session.get('authenticated_full_name', None),
'lock_name': name_validated and name_changes_disabled(realm),
# password_auth_enabled is normally set via our context processor,
# but for the registration form, there is no logged in user yet, so
# we have to set it here.
'password_auth_enabled': password_auth_enabled(realm),
},
context_instance=RequestContext(request))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile, prereg_user=None, newsletter_data=None):
mit_beta_user = user_profile.realm.domain == "mit.edu"
try:
streams = prereg_user.streams.all()
except AttributeError:
# This will catch both the case where prereg_user is None and where it
# is a MitUser.
streams = []
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
bulk_add_subscriptions(streams, [user_profile])
# Give you the last 100 messages on your streams, so you have
# something to look at in your home view once you finish the
# tutorial.
one_week_ago = now() - datetime.timedelta(weeks=1)
recipients = Recipient.objects.filter(type=Recipient.STREAM,
type_id__in=[stream.id for stream in streams])
messages = Message.objects.filter(recipient_id__in=recipients, pub_date__gt=one_week_ago).order_by("-id")[0:100]
if len(messages) > 0:
ums_to_create = [UserMessage(user_profile=user_profile, message=message,
flags=UserMessage.flags.read)
for message in messages]
UserMessage.objects.bulk_create(ums_to_create)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None \
and settings.NOTIFICATION_BOT is not None:
# This is a cross-realm private message.
internal_send_message(settings.NOTIFICATION_BOT,
"private", prereg_user.referred_by.email, user_profile.realm.domain,
"%s <`%s`> accepted your invitation to join Zulip!" % (
user_profile.full_name,
user_profile.email,
)
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).exclude(
id=prereg_user.id).update(status=0)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.email).update(status=0)
notify_new_user(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'EMAIL': user_profile.email,
'merge_vars': {
'NAME': user_profile.full_name,
'REALM': user_profile.realm.domain,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(datetime.datetime.now()),
},
},
lambda event: None)
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
def accounts_accept_terms(request):
email = request.user.email
domain = resolve_email_to_domain(email)
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
full_name = form.cleaned_data['full_name']
send_mail('Terms acceptance for ' + full_name,
loader.render_to_string('zerver/tos_accept_body.txt',
{'name': full_name,
'email': email,
'ip': request.META['REMOTE_ADDR'],
'browser': request.META['HTTP_USER_AGENT']}),
settings.EMAIL_HOST_USER,
["all@zulip.com"])
do_change_full_name(request.user, full_name)
return redirect(home)
else:
form = ToSForm()
return render_to_response('zerver/accounts_accept_terms.html',
{ 'form': form, 'company_name': domain, 'email': email },
context_instance=RequestContext(request))
from zerver.lib.ccache import make_ccache
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(request, user_profile,
cred=REQ(default=None)):
if cred is None:
return json_error("Could not find Kerberos credential")
if not user_profile.realm.domain == "mit.edu":
return json_error("Webathena login only for mit.edu realm")
try:
parsed_cred = ujson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user == "golem":
# Hack for an mit.edu user whose Kerberos username doesn't
# match what he zephyrs as
user = "ctl"
assert(user == user_profile.email.split("@")[0])
ccache = make_ccache(parsed_cred)
except Exception:
return json_error("Invalid Kerberos cache")
# TODO: Send these data via (say) rabbitmq
try:
subprocess.check_call(["ssh", "zulip@zmirror2.zulip.net", "--",
"/home/zulip/zulip/bots/process_ccache",
user,
user_profile.api_key,
base64.b64encode(ccache)])
except Exception:
logging.exception("Error updating the user's ccache")
return json_error("We were unable to setup mirroring for you")
return json_success()
def api_endpoint_docs(request):
raw_calls = open('templates/zerver/api_content.json', 'r').read()
calls = ujson.loads(raw_calls)
langs = set()
for call in calls:
call["endpoint"] = "%s/v1/%s" % (settings.EXTERNAL_API_URI, call["endpoint"])
call["example_request"]["curl"] = call["example_request"]["curl"].replace("https://api.zulip.com", settings.EXTERNAL_API_URI)
response = call['example_response']
if not '\n' in response:
# For 1-line responses, pretty-print them
extended_response = response.replace(", ", ",\n ")
else:
extended_response = response
call['rendered_response'] = bugdown.convert("~~~ .py\n" + extended_response + "\n~~~\n", "default")
for example_type in ('request', 'response'):
for lang in call.get('example_' + example_type, []):
langs.add(lang)
return render_to_response(
'zerver/api_endpoints.html', {
'content': calls,
'langs': langs,
},
context_instance=RequestContext(request))
@authenticated_json_post_view
@has_request_variables
def json_invite_users(request, user_profile, invitee_emails=REQ):
if not invitee_emails:
return json_error("You must specify at least one email address.")
invitee_emails = set(re.split(r'[, \n]', invitee_emails))
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error("You must specify at least one stream for invitees to join.")
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = []
for stream_name in stream_names:
stream = get_stream(stream_name, user_profile.realm)
if stream is None:
return json_error("Stream does not exist: %s. No invites were sent." % stream_name)
streams.append(stream)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
return json_success()
def create_homepage_form(request, user_info=None):
if user_info:
return HomepageForm(user_info, domain=request.session.get("domain"))
# An empty fields dict is not treated the same way as not
# providing it.
return HomepageForm(domain=request.session.get("domain"))
def maybe_send_to_registration(request, email, full_name=''):
form = create_homepage_form(request, user_info={'email': email})
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(email__iexact=email).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request)
else:
prereg_user = create_preregistration_user(email, request)
return redirect("".join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
"/",
# Split this so we only get the part after the /
Confirmation.objects.get_link_for_object(prereg_user).split("/", 3)[3],
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.quote_plus(full_name.encode('utf8')))))
else:
return render_to_response('zerver/accounts_home.html', {'form': form},
context_instance=RequestContext(request))
def login_or_register_remote_user(request, remote_username, user_profile, full_name=''):
if user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, the client specified a remote user
# but no associated user account exists. Send them over to the
# PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username), full_name)
else:
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
def remote_user_sso(request):
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
raise JsonableError("No REMOTE_USER set.")
user_profile = authenticate(remote_user=remote_user)
return login_or_register_remote_user(request, remote_user, user_profile)
@csrf_exempt
def remote_user_jwt(request):
try:
json_web_token = request.POST["json_web_token"]
payload, signing_input, header, signature = jwt.load(json_web_token)
except KeyError:
raise JsonableError("No JSON web token passed in request")
except jwt.DecodeError:
raise JsonableError("Bad JSON web token")
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError("No user specified in JSON web token claims")
domain = payload.get('realm', None)
if domain is None:
raise JsonableError("No domain specified in JSON web token claims")
email = "%s@%s" % (remote_user, domain)
try:
jwt.verify_signature(payload, signing_input, header, signature,
settings.JWT_AUTH_KEYS[domain])
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
user_profile = authenticate(username=email, use_dummy_backend=True)
except (jwt.DecodeError, jwt.ExpiredSignature):
raise JsonableError("Bad JSON web token signature")
except KeyError:
raise JsonableError("Realm not authorized for JWT login")
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request, value):
return hmac.new(get_token(request).encode('utf-8'), value, hashlib.sha256).hexdigest()
def start_google_oauth2(request):
uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '{}:{}'.format(
cur_time,
google_oauth2_csrf(request, cur_time),
)
prams = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(uri + urllib.urlencode(prams))
def finish_google_oauth2(request):
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.error('Error from google oauth2 login %r', request.GET)
return HttpResponse(status=400)
value, hmac_value = request.GET.get('state').split(':')
if hmac_value != google_oauth2_csrf(request, value):
raise Exception('Google oauth2 CSRF error')
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'grant_type': 'authorization_code',
},
)
if resp.status_code != 200:
raise Exception('Could not convert google pauth2 code to access_token\r%r' % resp.text)
access_token = resp.json['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code != 200:
raise Exception('Google login failed making API call\r%r' % resp.text)
body = resp.json
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formated name. I am ignoring i18n here.
full_name = u'{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
raise Exception('Google oauth2 account email not found %r' % body)
email_address = email['value']
user_profile = authenticate(username=email_address, use_dummy_backend=True)
return login_or_register_remote_user(request, email_address, user_profile, full_name)
def login_page(request, **kwargs):
extra_context = kwargs.pop('extra_context',{})
if dev_auth_enabled():
users = UserProfile.objects.filter(is_bot=False, is_active=True)
extra_context['direct_admins'] = sorted([u.email for u in users if u.is_admin()])
extra_context['direct_users'] = sorted([u.email for u in users if not u.is_admin()])
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
return template_response
def dev_direct_login(request, **kwargs):
# This function allows logging in without a password and should only be called in development environments.
# It may be called if the DevAuthBackend is included in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without an enabled DevAuthBackend.
raise Exception('Direct login not supported.')
email = request.POST['direct_email']
user_profile = authenticate(username=email)
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
@authenticated_json_post_view
@has_request_variables
def json_bulk_invite_users(request, user_profile,
invitee_emails=REQ(validator=check_list(check_string))):
invitee_emails = set(invitee_emails)
streams = get_default_subs(user_profile)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
# Report bulk invites to internal Zulip.
invited = PreregistrationUser.objects.filter(referred_by=user_profile)
internal_message = "%s <`%s`> invited %d people to Zulip." % (
user_profile.full_name, user_profile.email, invited.count())
internal_send_message(settings.NEW_USER_BOT, "stream", "signups",
user_profile.realm.domain, internal_message)
return json_success()
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
def initial_invite_page(request):
user = request.user
# Only show the bulk-invite page for the first user in a realm
domain_count = len(UserProfile.objects.filter(realm=user.realm))
if domain_count > 1:
return redirect('zerver.views.home')
params = {'company_name': user.realm.domain}
if (user.realm.restricted_to_domain):
params['invite_suffix'] = user.realm.domain
return render_to_response('zerver/initial_invite_page.html', params,
context_instance=RequestContext(request))
@require_post
def logout_then_login(request, **kwargs):
return django_logout_then_login(request, kwargs)
def create_preregistration_user(email, request):
domain = request.session.get("domain")
if completely_open(domain):
# Clear the "domain" from the session object; it's no longer needed
request.session["domain"] = None
# The user is trying to sign up for a completely open realm,
# so create them a PreregistrationUser for that realm
return PreregistrationUser.objects.create(email=email,
realm=get_realm(domain))
# MIT users who are not explicitly signing up for an open realm
# require special handling (They may already have an (inactive)
# account, for example)
if split_email_to_domain(email) == "mit.edu":
return MitUser.objects.get_or_create(email=email)[0]
return PreregistrationUser.objects.create(email=email)
def accounts_home_with_domain(request, domain):
if completely_open(domain):
# You can sign up for a completely open realm through a
# special registration path that contains the domain in the
# URL. We store this information in the session rather than
# elsewhere because we don't have control over URL or form
# data for folks registering through OpenID.
request.session["domain"] = domain
return accounts_home(request)
else:
return HttpResponseRedirect(reverse('zerver.views.accounts_home'))
def send_registration_completion_email(email, request):
"""
Send an email with a confirmation link to the provided e-mail so the user
can complete their registration.
"""
prereg_user = create_preregistration_user(email, request)
context = {'support_email': settings.ZULIP_ADMINISTRATOR,
'voyager': settings.VOYAGER}
Confirmation.objects.send_confirmation(prereg_user, email,
additional_context=context)
def accounts_home(request):
# First we populate request.session with a domain if
# there is a single realm, which is open.
# This is then used in HomepageForm and in creating a PreregistrationUser
unique_realm = get_unique_open_realm()
if unique_realm:
request.session['domain'] = unique_realm.domain
if request.method == 'POST':
form = create_homepage_form(request, user_info=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
send_registration_completion_email(email, request)
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
# Note: We don't check for uniqueness
is_inactive(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.quote_plus(email))
else:
form = create_homepage_form(request)
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': request.get_full_path},
context_instance=RequestContext(request))
def approximate_unread_count(user_profile):
not_in_home_view_recipients = [sub.recipient.id for sub in \
Subscription.objects.filter(
user_profile=user_profile, in_home_view=False)]
muted_topics = ujson.loads(user_profile.muted_topics)
# If muted_topics is empty, it looks like []. If it is non-empty, it look
# like [[u'devel', u'test']]. We should switch to a consistent envelope, but
# until we do we still have both in the database.
if muted_topics:
muted_topics = muted_topics[0]
return UserMessage.objects.filter(
user_profile=user_profile, message_id__gt=user_profile.pointer).exclude(
message__recipient__type=Recipient.STREAM,
message__recipient__id__in=not_in_home_view_recipients).exclude(
message__subject__in=muted_topics).exclude(
flags=UserMessage.flags.read).count()
def sent_time_in_epoch_seconds(user_message):
# user_message is a UserMessage object.
if not user_message:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
def home(request):
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
request._email = request.user.email
request.client = get_client("website")
narrow = []
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream = get_stream(request.GET.get("stream"), user_profile.realm)
assert(narrow_stream is not None)
assert(narrow_stream.is_public())
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if not user_profile.last_reminder is None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
desktop_notifications_enabled = user_profile.enable_desktop_notifications
if narrow_stream is not None:
desktop_notifications_enabled = False
if user_profile.realm.notifications_stream:
notifications_stream = user_profile.realm.notifications_stream.name
else:
notifications_stream = ""
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
voyager = settings.VOYAGER,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
password_auth_enabled = password_auth_enabled(user_profile.realm),
have_initial_messages = user_has_messages,
subbed_info = register_ret['subscriptions'],
unsubbed_info = register_ret['unsubscribed'],
email_dict = register_ret['email_dict'],
people_list = register_ret['realm_users'],
bot_list = register_ret['realm_bots'],
initial_pointer = register_ret['pointer'],
initial_presences = register_ret['presences'],
initial_servertime = time.time(), # Used for calculating relative presence age
fullname = user_profile.full_name,
email = user_profile.email,
domain = user_profile.realm.domain,
realm_name = register_ret['realm_name'],
realm_invite_required = register_ret['realm_invite_required'],
realm_invite_by_admins_only = register_ret['realm_invite_by_admins_only'],
realm_restricted_to_domain = register_ret['realm_restricted_to_domain'],
enter_sends = user_profile.enter_sends,
left_side_userlist = register_ret['left_side_userlist'],
referrals = register_ret['referrals'],
realm_emoji = register_ret['realm_emoji'],
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
notifications_stream = notifications_stream,
# Stream message notification settings:
stream_desktop_notifications_enabled =
user_profile.enable_stream_desktop_notifications,
stream_sounds_enabled = user_profile.enable_stream_sounds,
# Private message and @-mention notification settings:
desktop_notifications_enabled = desktop_notifications_enabled,
sounds_enabled =
user_profile.enable_sounds,
enable_offline_email_notifications =
user_profile.enable_offline_email_notifications,
enable_offline_push_notifications =
user_profile.enable_offline_push_notifications,
twenty_four_hour_time = register_ret['twenty_four_hour_time'],
enable_digest_emails = user_profile.enable_digest_emails,
event_queue_id = register_ret['queue_id'],
last_event_id = register_ret['last_event_id'],
max_message_id = register_ret['max_message_id'],
unread_count = approximate_unread_count(user_profile),
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
staging = settings.ZULIP_COM_STAGING or settings.DEVELOPMENT,
alert_words = register_ret['alert_words'],
muted_topics = register_ret['muted_topics'],
realm_filters = register_ret['realm_filters'],
is_admin = user_profile.is_admin(),
can_create_streams = user_profile.can_create_streams(),
name_changes_disabled = name_changes_disabled(user_profile.realm),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
autoscroll_forever = user_profile.autoscroll_forever,
default_desktop_notifications = user_profile.default_desktop_notifications,
avatar_url = avatar_url(user_profile),
mandatory_topics = user_profile.realm.mandatory_topics,
show_digest_email = user_profile.realm.show_digest_email,
)
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["initial_pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_admin():
show_invites = False
product_name = "Zulip"
page_params['product_name'] = product_name
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render_to_response('zerver/index.html',
{'user_profile': user_profile,
'page_params' : simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META["HTTP_USER_AGENT"]),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'show_invites': show_invites,
'is_admin': user_profile.is_admin(),
'show_webathena': user_profile.realm.domain == "mit.edu",
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
'product_name': product_name
},
context_instance=RequestContext(request))
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
def desktop_home(request):
return HttpResponseRedirect(reverse('zerver.views.home'))
def is_buggy_ua(agent):
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
not "Mac" in agent
def get_pointer_backend(request, user_profile):
return json_success({'pointer': user_profile.pointer})
@authenticated_json_post_view
def json_update_pointer(request, user_profile):
return update_pointer_backend(request, user_profile)
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError("Invalid message ID")
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
return generate_random_token(32)
@authenticated_json_post_view
def json_get_profile(request, user_profile):
return get_profile_backend(request, user_profile)
# The order of creation of the various dictionaries are important.
# We filter on {userprofile,stream,subscription_recipient}_ids.
@require_realm_admin
def export(request, user_profile):
if (Message.objects.filter(sender__realm=user_profile.realm).count() > 1000000 or
UserMessage.objects.filter(user_profile__realm=user_profile.realm).count() > 3000000):
return json_error("Realm has too much data for non-batched export.")
response = {}
response['zerver_realm'] = [model_to_dict(x)
for x in Realm.objects.select_related().filter(id=user_profile.realm.id)]
response['zerver_userprofile'] = [model_to_dict(x, exclude=["password", "api_key"])
for x in UserProfile.objects.select_related().filter(realm=user_profile.realm)]
userprofile_ids = set(userprofile["id"] for userprofile in response['zerver_userprofile'])
response['zerver_stream'] = [model_to_dict(x, exclude=["email_token"])
for x in Stream.objects.select_related().filter(realm=user_profile.realm,invite_only=False)]
stream_ids = set(x["id"] for x in response['zerver_stream'])
response['zerver_usermessage'] = [model_to_dict(x) for x in UserMessage.objects.select_related()
if x.user_profile_id in userprofile_ids]
user_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=1)
if x.type_id in userprofile_ids]
stream_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=2)
if x.type_id in stream_ids]
stream_recipient_ids = set(x["id"] for x in stream_recipients)
# only check for subscriptions to streams
response['zerver_subscription'] = [model_to_dict(x) for x in Subscription.objects.select_related()
if x.user_profile_id in userprofile_ids
and x.recipient_id in stream_recipient_ids]
subscription_recipient_ids = set(x["recipient"] for x in response['zerver_subscription'])
huddle_recipients = [model_to_dict(r)
for r in Recipient.objects.select_related().filter(type=3)
if r.type_id in subscription_recipient_ids]
huddle_ids = set(x["type_id"] for x in huddle_recipients)
response["zerver_recipient"] = user_recipients + stream_recipients + huddle_recipients
response['zerver_huddle'] = [model_to_dict(h)
for h in Huddle.objects.select_related()
if h.id in huddle_ids]
recipient_ids = set(x["id"] for x in response['zerver_recipient'])
response["zerver_message"] = [model_to_dict(m) for m in Message.objects.select_related()
if m.recipient_id in recipient_ids
and m.sender_id in userprofile_ids]
for (table, model) in [("defaultstream", DefaultStream),
("realmemoji", RealmEmoji),
("realmalias", RealmAlias),
("realmfilter", RealmFilter)]:
response["zerver_"+table] = [model_to_dict(x) for x in
model.objects.select_related().filter(realm_id=user_profile.realm.id)]
return json_success(response)
def get_profile_backend(request, user_profile):
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
@authenticated_json_post_view
@has_request_variables
def json_change_enter_sends(request, user_profile,
enter_sends=REQ('enter_sends', validator=check_bool)):
do_change_enter_sends(user_profile, enter_sends)
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_tutorial_send_message(request, user_profile, type=REQ,
recipient=REQ, topic=REQ, content=REQ):
"""
This function, used by the onboarding tutorial, causes the Tutorial Bot to
send you the message you pass in here. (That way, the Tutorial Bot's
messages to you get rendered by the server and therefore look like any other
message.)
"""
sender_name = "welcome-bot@zulip.com"
if type == 'stream':
internal_send_message(sender_name, "stream", recipient, topic, content,
realm=user_profile.realm)
return json_success()
# For now, there are no PM cases.
return json_error('Bad data passed in to tutorial_send_message')
@authenticated_json_post_view
@has_request_variables
def json_tutorial_status(request, user_profile, status=REQ('status')):
if status == 'started':
user_profile.tutorial_status = UserProfile.TUTORIAL_STARTED
elif status == 'finished':
user_profile.tutorial_status = UserProfile.TUTORIAL_FINISHED
user_profile.save(update_fields=["tutorial_status"])
return json_success()
@authenticated_json_post_view
def json_get_public_streams(request, user_profile):
return get_public_streams_backend(request, user_profile)
# By default, lists all streams that the user has access to --
# i.e. public streams plus invite-only streams that the user is on
@has_request_variables
def get_streams_backend(request, user_profile,
include_public=REQ(validator=check_bool, default=True),
include_subscribed=REQ(validator=check_bool, default=True),
include_all_active=REQ(validator=check_bool, default=False)):
streams = do_get_streams(user_profile, include_public, include_subscribed,
include_all_active)
return json_success({"streams": streams})
def get_public_streams_backend(request, user_profile):
return get_streams_backend(request, user_profile, include_public=True,
include_subscribed=False, include_all_active=False)
@require_realm_admin
@has_request_variables
def update_realm(request, user_profile, name=REQ(validator=check_string, default=None),
restricted_to_domain=REQ(validator=check_bool, default=None),
invite_required=REQ(validator=check_bool,default=None),
invite_by_admins_only=REQ(validator=check_bool,default=None)):
realm = user_profile.realm
data = {}
if name is not None and realm.name != name:
do_set_realm_name(realm, name)
data['name'] = 'updated'
if restricted_to_domain is not None and realm.restricted_to_domain != restricted_to_domain:
do_set_realm_restricted_to_domain(realm, restricted_to_domain)
data['restricted_to_domain'] = restricted_to_domain
if invite_required is not None and realm.invite_required != invite_required:
do_set_realm_invite_required(realm, invite_required)
data['invite_required'] = invite_required
if invite_by_admins_only is not None and realm.invite_by_admins_only != invite_by_admins_only:
do_set_realm_invite_by_admins_only(realm, invite_by_admins_only)
data['invite_by_admins_only'] = invite_by_admins_only
return json_success(data)
@require_realm_admin
@has_request_variables
def add_default_stream(request, user_profile, stream_name=REQ):
return json_success(do_add_default_stream(user_profile.realm, stream_name))
@require_realm_admin
@has_request_variables
def remove_default_stream(request, user_profile, stream_name=REQ):
return json_success(do_remove_default_stream(user_profile.realm, stream_name))
@authenticated_json_post_view
@require_realm_admin
@has_request_variables
def json_rename_stream(request, user_profile, old_name=REQ, new_name=REQ):
return json_success(do_rename_stream(user_profile.realm, old_name, new_name))
@authenticated_json_post_view
@require_realm_admin
@has_request_variables
def json_make_stream_public(request, user_profile, stream_name=REQ):
return json_success(do_make_stream_public(user_profile, user_profile.realm, stream_name))
@authenticated_json_post_view
@require_realm_admin
@has_request_variables
def json_make_stream_private(request, user_profile, stream_name=REQ):
return json_success(do_make_stream_private(user_profile.realm, stream_name))
@require_realm_admin
@has_request_variables
def update_stream_backend(request, user_profile, stream_name,
description=REQ(validator=check_string, default=None)):
if description is not None:
do_change_stream_description(user_profile.realm, stream_name, description)
return json_success({})
def list_subscriptions_backend(request, user_profile):
return json_success({"subscriptions": gather_subscriptions(user_profile)[0]})
@transaction.atomic
@has_request_variables
def update_subscriptions_backend(request, user_profile,
delete=REQ(validator=check_list(check_string), default=[]),
add=REQ(validator=check_list(check_dict([['name', check_string]])), default=[])):
if not add and not delete:
return json_error('Nothing to do. Specify at least one of "add" or "delete".')
json_dict = {}
for method, items in ((add_subscriptions_backend, add), (remove_subscriptions_backend, delete)):
response = method(request, user_profile, streams_raw=items)
if response.status_code != 200:
transaction.rollback()
return response
json_dict.update(ujson.loads(response.content))
return json_success(json_dict)
@authenticated_json_post_view
def json_remove_subscriptions(request, user_profile):
return remove_subscriptions_backend(request, user_profile)
@has_request_variables
def remove_subscriptions_backend(request, user_profile,
streams_raw = REQ("subscriptions", validator=check_list(check_string)),
principals = REQ(validator=check_list(check_string), default=None)):
removing_someone_else = principals and \
set(principals) != set((user_profile.email,))
if removing_someone_else and not user_profile.is_admin():
# You can only unsubscribe other people from a stream if you are a realm
# admin.
return json_error("This action requires administrative rights")
streams, _ = list_to_streams(streams_raw, user_profile)
for stream in streams:
if removing_someone_else and stream.invite_only and \
not subscribed_to_stream(user_profile, stream):
# Even as an admin, you can't remove other people from an
# invite-only stream you're not on.
return json_error("Cannot administer invite-only streams this way")
if principals:
people_to_unsub = set(principal_to_user_profile(
user_profile, principal) for principal in principals)
else:
people_to_unsub = [user_profile]
result = dict(removed=[], not_subscribed=[])
(removed, not_subscribed) = bulk_remove_subscriptions(people_to_unsub, streams)
for (subscriber, stream) in removed:
result["removed"].append(stream.name)
for (subscriber, stream) in not_subscribed:
result["not_subscribed"].append(stream.name)
return json_success(result)
@authenticated_json_post_view
def json_add_subscriptions(request, user_profile):
return add_subscriptions_backend(request, user_profile)
def filter_stream_authorization(user_profile, streams):
streams_subscribed = set()
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams])
subs = Subscription.objects.filter(user_profile=user_profile,
recipient__in=recipients_map.values(),
active=True)
for sub in subs:
streams_subscribed.add(sub.recipient.type_id)
unauthorized_streams = []
for stream in streams:
# The user is authorized for his own streams
if stream.id in streams_subscribed:
continue
# The user is not authorized for invite_only streams
if stream.invite_only:
unauthorized_streams.append(stream)
streams = [stream for stream in streams if
stream.id not in set(stream.id for stream in unauthorized_streams)]
return streams, unauthorized_streams
def stream_link(stream_name):
"Escapes a stream name to make a #narrow/stream/stream_name link"
return "#narrow/stream/%s" % (urllib.quote(stream_name.encode('utf-8')),)
def stream_button(stream_name):
stream_name = stream_name.replace('\\', '\\\\')
stream_name = stream_name.replace(')', '\\)')
return '!_stream_subscribe_button(%s)' % (stream_name,)
@has_request_variables
def add_subscriptions_backend(request, user_profile,
streams_raw = REQ("subscriptions",
validator=check_list(check_dict([['name', check_string]]))),
invite_only = REQ(validator=check_bool, default=False),
announce = REQ(validator=check_bool, default=False),
principals = REQ(validator=check_list(check_string), default=None),
authorization_errors_fatal = REQ(validator=check_bool, default=True)):
if not user_profile.can_create_streams():
return json_error('User cannot create streams.')
stream_names = []
for stream in streams_raw:
stream_name = stream["name"].strip()
if len(stream_name) > Stream.MAX_NAME_LENGTH:
return json_error("Stream name (%s) too long." % (stream_name,))
if not valid_stream_name(stream_name):
return json_error("Invalid stream name (%s)." % (stream_name,))
stream_names.append(stream_name)
existing_streams, created_streams = \
list_to_streams(stream_names, user_profile, autocreate=True, invite_only=invite_only)
authorized_streams, unauthorized_streams = \
filter_stream_authorization(user_profile, existing_streams)
if len(unauthorized_streams) > 0 and authorization_errors_fatal:
return json_error("Unable to access stream (%s)." % unauthorized_streams[0].name)
# Newly created streams are also authorized for the creator
streams = authorized_streams + created_streams
if principals is not None:
if user_profile.realm.domain == 'mit.edu' and not all(stream.invite_only for stream in streams):
return json_error("You can only invite other mit.edu users to invite-only streams.")
subscribers = set(principal_to_user_profile(user_profile, principal) for principal in principals)
else:
subscribers = [user_profile]
(subscribed, already_subscribed) = bulk_add_subscriptions(streams, subscribers)
result = dict(subscribed=defaultdict(list), already_subscribed=defaultdict(list))
for (subscriber, stream) in subscribed:
result["subscribed"][subscriber.email].append(stream.name)
for (subscriber, stream) in already_subscribed:
result["already_subscribed"][subscriber.email].append(stream.name)
private_streams = dict((stream.name, stream.invite_only) for stream in streams)
bots = dict((subscriber.email, subscriber.is_bot) for subscriber in subscribers)
# Inform the user if someone else subscribed them to stuff,
# or if a new stream was created with the "announce" option.
notifications = []
if principals and result["subscribed"]:
for email, subscriptions in result["subscribed"].iteritems():
if email == user_profile.email:
# Don't send a Zulip if you invited yourself.
continue
if bots[email]:
# Don't send invitation Zulips to bots
continue
if len(subscriptions) == 1:
msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the%s stream [%s](%s)."
% (user_profile.full_name,
" **invite-only**" if private_streams[subscriptions[0]] else "",
subscriptions[0],
stream_link(subscriptions[0]),
))
else:
msg = ("Hi there! We thought you'd like to know that %s just "
"subscribed you to the following streams: \n\n"
% (user_profile.full_name,))
for stream in subscriptions:
msg += "* [%s](%s)%s\n" % (
stream,
stream_link(stream),
" (**invite-only**)" if private_streams[stream] else "")
if len([s for s in subscriptions if not private_streams[s]]) > 0:
msg += "\nYou can see historical content on a non-invite-only stream by narrowing to it."
notifications.append(internal_prep_message(settings.NOTIFICATION_BOT,
"private", email, "", msg))
if announce and len(created_streams) > 0:
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream is not None:
if len(created_streams) > 1:
stream_msg = "the following streams: %s" % \
(", ".join('`%s`' % (s.name,) for s in created_streams),)
else:
stream_msg = "a new stream `%s`" % (created_streams[0].name)
stream_buttons = ' '.join(stream_button(s.name) for s in created_streams)
msg = ("%s just created %s. %s" % (user_profile.full_name,
stream_msg, stream_buttons))
notifications.append(internal_prep_message(settings.NOTIFICATION_BOT,
"stream",
notifications_stream.name, "Streams", msg,
realm=notifications_stream.realm))
else:
msg = ("Hi there! %s just created a new stream '%s'. %s"
% (user_profile.full_name, created_streams[0].name, stream_button(created_streams[0].name)))
for realm_user_dict in get_active_user_dicts_in_realm(user_profile.realm):
# Don't announce to yourself or to people you explicitly added
# (who will get the notification above instead).
if realm_user_dict['email'] in principals or realm_user_dict['email'] == user_profile.email:
continue
notifications.append(internal_prep_message(settings.NOTIFICATION_BOT,
"private",
realm_user_dict['email'], "", msg))
if len(notifications) > 0:
do_send_messages(notifications)
result["subscribed"] = dict(result["subscribed"])
result["already_subscribed"] = dict(result["already_subscribed"])
if not authorization_errors_fatal:
result["unauthorized"] = [stream.name for stream in unauthorized_streams]
return json_success(result)
def get_members_backend(request, user_profile):
realm = user_profile.realm
admins = set(user_profile.realm.get_admin_users())
members = []
for profile in UserProfile.objects.select_related().filter(realm=realm):
avatar_url = get_avatar_url(
profile.avatar_source,
profile.email
)
member = {"full_name": profile.full_name,
"is_bot": profile.is_bot,
"is_active": profile.is_active,
"is_admin": (profile in admins),
"email": profile.email,
"avatar_url": avatar_url,}
if profile.is_bot and profile.bot_owner is not None:
member["bot_owner"] = profile.bot_owner.email
members.append(member)
return json_success({'members': members})
@authenticated_json_post_view
def json_get_subscribers(request, user_profile):
return get_subscribers_backend(request, user_profile)
@authenticated_json_post_view
@has_request_variables
def json_upload_file(request, user_profile):
if len(request.FILES) == 0:
return json_error("You must specify a file to upload")
if len(request.FILES) != 1:
return json_error("You may only upload one file at a time")
user_file = request.FILES.values()[0]
uri = upload_message_image_through_web_client(request, user_file, user_profile)
return json_success({'uri': uri})
@login_required(login_url = settings.HOME_NOT_LOGGED_IN)
@has_request_variables
def get_uploaded_file(request, realm_id, filename,
redir=REQ(validator=check_bool, default=True)):
if settings.LOCAL_UPLOADS_DIR is not None:
return HttpResponseForbidden() # Should have been served by nginx
user_profile = request.user
url_path = "%s/%s" % (realm_id, filename)
if realm_id == "unk":
realm_id = get_realm_for_filename(url_path)
if realm_id is None:
# File does not exist
return json_error("That file does not exist.", status=404)
# Internal users can access all uploads so we can receive attachments in cross-realm messages
if user_profile.realm.id == int(realm_id) or user_profile.realm.domain == 'zulip.com':
uri = get_signed_upload_url(url_path)
if redir:
return redirect(uri)
else:
return json_success({'uri': uri})
else:
return HttpResponseForbidden()
@has_request_variables
def get_subscribers_backend(request, user_profile, stream_name=REQ('stream')):
stream = get_stream(stream_name, user_profile.realm)
if stream is None:
raise JsonableError("Stream does not exist: %s" % (stream_name,))
subscribers = get_subscriber_emails(stream, user_profile)
return json_success({'subscribers': subscribers})
@authenticated_json_post_view
@has_request_variables
def json_change_settings(request, user_profile,
full_name=REQ,
old_password=REQ(default=""),
new_password=REQ(default=""),
confirm_password=REQ(default="")):
if new_password != "" or confirm_password != "":
if new_password != confirm_password:
return json_error("New password must match confirmation password!")
if not authenticate(username=user_profile.email, password=old_password):
return json_error("Wrong password!")
do_change_password(user_profile, new_password)
result = {}
if user_profile.full_name != full_name and full_name.strip() != "":
if name_changes_disabled(user_profile.realm):
# Failingly silently is fine -- they can't do it through the UI, so
# they'd have to be trying to break the rules.
pass
else:
new_full_name = full_name.strip()
if len(new_full_name) > UserProfile.MAX_NAME_LENGTH:
return json_error("Name too long!")
do_change_full_name(user_profile, new_full_name)
result['full_name'] = new_full_name
return json_success(result)
@authenticated_json_post_view
@has_request_variables
def json_time_setting(request, user_profile, twenty_four_hour_time=REQ(validator=check_bool,default=None)):
result = {}
if twenty_four_hour_time is not None and \
user_profile.twenty_four_hour_time != twenty_four_hour_time:
do_change_twenty_four_hour_time(user_profile, twenty_four_hour_time)
result['twenty_four_hour_time'] = twenty_four_hour_time
return json_success(result)
@authenticated_json_post_view
@has_request_variables
def json_left_side_userlist(request, user_profile, left_side_userlist=REQ(validator=check_bool,default=None)):
result = {}
if left_side_userlist is not None and \
user_profile.left_side_userlist != left_side_userlist:
do_change_left_side_userlist(user_profile, left_side_userlist)
result['left_side_userlist'] = left_side_userlist
return json_success(result)
@authenticated_json_post_view
@has_request_variables
def json_change_notify_settings(request, user_profile,
enable_stream_desktop_notifications=REQ(validator=check_bool,
default=None),
enable_stream_sounds=REQ(validator=check_bool,
default=None),
enable_desktop_notifications=REQ(validator=check_bool,
default=None),
enable_sounds=REQ(validator=check_bool,
default=None),
enable_offline_email_notifications=REQ(validator=check_bool,
default=None),
enable_offline_push_notifications=REQ(validator=check_bool,
default=None),
enable_digest_emails=REQ(validator=check_bool,
default=None)):
result = {}
# Stream notification settings.
if enable_stream_desktop_notifications is not None and \
user_profile.enable_stream_desktop_notifications != enable_stream_desktop_notifications:
do_change_enable_stream_desktop_notifications(
user_profile, enable_stream_desktop_notifications)
result['enable_stream_desktop_notifications'] = enable_stream_desktop_notifications
if enable_stream_sounds is not None and \
user_profile.enable_stream_sounds != enable_stream_sounds:
do_change_enable_stream_sounds(user_profile, enable_stream_sounds)
result['enable_stream_sounds'] = enable_stream_sounds
# PM and @-mention settings.
if enable_desktop_notifications is not None and \
user_profile.enable_desktop_notifications != enable_desktop_notifications:
do_change_enable_desktop_notifications(user_profile, enable_desktop_notifications)
result['enable_desktop_notifications'] = enable_desktop_notifications
if enable_sounds is not None and \
user_profile.enable_sounds != enable_sounds:
do_change_enable_sounds(user_profile, enable_sounds)
result['enable_sounds'] = enable_sounds
if enable_offline_email_notifications is not None and \
user_profile.enable_offline_email_notifications != enable_offline_email_notifications:
do_change_enable_offline_email_notifications(user_profile, enable_offline_email_notifications)
result['enable_offline_email_notifications'] = enable_offline_email_notifications
if enable_offline_push_notifications is not None and \
user_profile.enable_offline_push_notifications != enable_offline_push_notifications:
do_change_enable_offline_push_notifications(user_profile, enable_offline_push_notifications)
result['enable_offline_push_notifications'] = enable_offline_push_notifications
if enable_digest_emails is not None and \
user_profile.enable_digest_emails != enable_digest_emails:
do_change_enable_digest_emails(user_profile, enable_digest_emails)
result['enable_digest_emails'] = enable_digest_emails
return json_success(result)
@require_realm_admin
@has_request_variables
def create_user_backend(request, user_profile, email=REQ, password=REQ,
full_name=REQ, short_name=REQ):
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
return json_error('Bad name or username')
# Check that the new user's email address belongs to the admin's realm
realm = user_profile.realm
domain = resolve_email_to_domain(email)
if realm.domain != domain:
return json_error("Email '%s' does not belong to domain '%s'" % (email, realm.domain))
try:
get_user_profile_by_email(email)
return json_error("Email '%s' already in use" % (email,))
except UserProfile.DoesNotExist:
pass
new_user_profile = do_create_user(email, password, realm, full_name, short_name)
process_new_human_user(new_user_profile)
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_change_ui_settings(request, user_profile,
autoscroll_forever=REQ(validator=check_bool,
default=None),
default_desktop_notifications=REQ(validator=check_bool,
default=None)):
result = {}
if autoscroll_forever is not None and \
user_profile.autoscroll_forever != autoscroll_forever:
do_change_autoscroll_forever(user_profile, autoscroll_forever)
result['autoscroll_forever'] = autoscroll_forever
if default_desktop_notifications is not None and \
user_profile.default_desktop_notifications != default_desktop_notifications:
do_change_default_desktop_notifications(user_profile, default_desktop_notifications)
result['default_desktop_notifications'] = default_desktop_notifications
return json_success(result)
@authenticated_json_post_view
@has_request_variables
def json_stream_exists(request, user_profile, stream=REQ,
autosubscribe=REQ(default=False)):
return stream_exists_backend(request, user_profile, stream, autosubscribe)
def stream_exists_backend(request, user_profile, stream_name, autosubscribe):
if not valid_stream_name(stream_name):
return json_error("Invalid characters in stream name")
stream = get_stream(stream_name, user_profile.realm)
result = {"exists": bool(stream)}
if stream is not None:
recipient = get_recipient(Recipient.STREAM, stream.id)
if autosubscribe:
bulk_add_subscriptions([stream], [user_profile])
result["subscribed"] = Subscription.objects.filter(user_profile=user_profile,
recipient=recipient,
active=True).exists()
return json_success(result) # results are ignored for HEAD requests
return json_response(data=result, status=404)
def get_subscription_or_die(stream_name, user_profile):
stream = get_stream(stream_name, user_profile.realm)
if not stream:
raise JsonableError("Invalid stream %s" % (stream.name,))
recipient = get_recipient(Recipient.STREAM, stream.id)
subscription = Subscription.objects.filter(user_profile=user_profile,
recipient=recipient, active=True)
if not subscription.exists():
raise JsonableError("Not subscribed to stream %s" % (stream_name,))
return subscription
@authenticated_json_view
@has_request_variables
def json_subscription_property(request, user_profile, subscription_data=REQ(
validator=check_list(
check_dict([["stream", check_string],
["property", check_string],
["value", check_variable_type(
[check_string, check_bool])]])))):
"""
This is the entry point to changing subscription properties. This
is a bulk endpoint: requestors always provide a subscription_data
list containing dictionaries for each stream of interest.
Requests are of the form:
[{"stream": "devel", "property": "in_home_view", "value": False},
{"stream": "devel", "property": "color", "value": "#c2c2c2"}]
"""
if request.method != "POST":
return json_error("Invalid verb")
property_converters = {"color": check_string, "in_home_view": check_bool,
"desktop_notifications": check_bool,
"audible_notifications": check_bool}
response_data = []
for change in subscription_data:
stream_name = change["stream"]
property = change["property"]
value = change["value"]
if property not in property_converters:
return json_error("Unknown subscription property: %s" % (property,))
sub = get_subscription_or_die(stream_name, user_profile)[0]
property_conversion = property_converters[property](property, value)
if property_conversion:
return json_error(property_conversion)
do_change_subscription_property(user_profile, sub, stream_name,
property, value)
response_data.append({'stream': stream_name,
'property': property,
'value': value})
return json_success({"subscription_data": response_data})
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request, username=REQ, password=REQ):
return_data = {}
if username == "google-oauth2-token":
user_profile = authenticate(google_oauth2_token=password, return_data=return_data)
else:
user_profile = authenticate(username=username, password=password)
if user_profile is None:
if return_data.get("valid_attestation") == True:
# We can leak that the user is unregistered iff they present a valid authentication string for the user.
return json_error("This user is not registered; do so from a browser.", data={"reason": "unregistered"}, status=403)
return json_error("Your username or password is incorrect.", data={"reason": "incorrect_creds"}, status=403)
if not user_profile.is_active:
return json_error("Your account has been disabled.", data={"reason": "disabled"}, status=403)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@authenticated_json_post_view
@has_request_variables
def json_fetch_api_key(request, user_profile, password=REQ(default='')):
if password_auth_enabled(user_profile.realm) and not user_profile.check_password(password):
return json_error("Your username or password is incorrect.")
return json_success({"api_key": user_profile.api_key})
def get_status_list(requesting_user_profile):
return {'presences': get_status_dict(requesting_user_profile),
'server_timestamp': time.time()}
@has_request_variables
def update_active_status_backend(request, user_profile, status=REQ,
new_user_input=REQ(validator=check_bool, default=False)):
status_val = UserPresence.status_from_string(status)
if status_val is None:
raise JsonableError("Invalid presence status: %s" % (status,))
else:
update_user_presence(user_profile, request.client, now(), status_val,
new_user_input)
ret = get_status_list(user_profile)
if user_profile.realm.domain == "mit.edu":
try:
activity = UserActivity.objects.get(user_profile = user_profile,
query="get_events_backend",
client__name="zephyr_mirror")
ret['zephyr_mirror_active'] = \
(activity.last_visit.replace(tzinfo=None) >
datetime.datetime.utcnow() - datetime.timedelta(minutes=5))
except UserActivity.DoesNotExist:
ret['zephyr_mirror_active'] = False
return json_success(ret)
@authenticated_json_post_view
def json_update_active_status(request, user_profile):
return update_active_status_backend(request, user_profile)
@authenticated_json_post_view
def json_get_active_statuses(request, user_profile):
return json_success(get_status_list(user_profile))
# Read the source map information for decoding JavaScript backtraces
js_source_map = None
if not (settings.DEBUG or settings.TEST_SUITE):
js_source_map = SourceMap(os.path.join(
settings.DEPLOY_ROOT, 'prod-static/source-map'))
@authenticated_json_post_view
@has_request_variables
def json_report_send_time(request, user_profile,
time=REQ(converter=to_non_negative_int),
received=REQ(converter=to_non_negative_int, default="(unknown)"),
displayed=REQ(converter=to_non_negative_int, default="(unknown)"),
locally_echoed=REQ(validator=check_bool, default=False),
rendered_content_disparity=REQ(validator=check_bool, default=False)):
request._log_data["extra"] = "[%sms/%sms/%sms/echo:%s/diff:%s]" \
% (time, received, displayed, locally_echoed, rendered_content_disparity)
statsd.timing("endtoend.send_time.%s" % (statsd_key(user_profile.realm.domain, clean_periods=True),), time)
if received != "(unknown)":
statsd.timing("endtoend.receive_time.%s" % (statsd_key(user_profile.realm.domain, clean_periods=True),), received)
if displayed != "(unknown)":
statsd.timing("endtoend.displayed_time.%s" % (statsd_key(user_profile.realm.domain, clean_periods=True),), displayed)
if locally_echoed:
statsd.incr('locally_echoed')
if rendered_content_disparity:
statsd.incr('render_disparity')
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_report_narrow_time(request, user_profile,
initial_core=REQ(converter=to_non_negative_int),
initial_free=REQ(converter=to_non_negative_int),
network=REQ(converter=to_non_negative_int)):
request._log_data["extra"] = "[%sms/%sms/%sms]" % (initial_core, initial_free, network)
statsd.timing("narrow.initial_core.%s" % (statsd_key(user_profile.realm.domain, clean_periods=True),), initial_core)
statsd.timing("narrow.initial_free.%s" % (statsd_key(user_profile.realm.domain, clean_periods=True),), initial_free)
statsd.timing("narrow.network.%s" % (statsd_key(user_profile.realm.domain, clean_periods=True),), network)
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_report_unnarrow_time(request, user_profile,
initial_core=REQ(converter=to_non_negative_int),
initial_free=REQ(converter=to_non_negative_int)):
request._log_data["extra"] = "[%sms/%sms]" % (initial_core, initial_free)
statsd.timing("unnarrow.initial_core.%s" % (statsd_key(user_profile.realm.domain, clean_periods=True),), initial_core)
statsd.timing("unnarrow.initial_free.%s" % (statsd_key(user_profile.realm.domain, clean_periods=True),), initial_free)
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_report_error(request, user_profile, message=REQ, stacktrace=REQ,
ui_message=REQ(validator=check_bool), user_agent=REQ,
href=REQ, log=REQ,
more_info=REQ(validator=check_dict([]), default=None)):
if not settings.ERROR_REPORTING:
return json_success()
if js_source_map:
stacktrace = js_source_map.annotate_stacktrace(stacktrace)
try:
version = subprocess.check_output(["git", "log", "HEAD^..HEAD", "--oneline"])
except Exception:
version = None
queue_json_publish('error_reports', dict(
type = "browser",
report = dict(
user_email = user_profile.email,
user_full_name = user_profile.full_name,
user_visible = ui_message,
server_path = settings.DEPLOY_ROOT,
version = version,
user_agent = user_agent,
href = href,
message = message,
stacktrace = stacktrace,
log = log,
more_info = more_info,
)
), lambda x: None)
return json_success()
@authenticated_json_post_view
def json_events_register(request, user_profile):
return events_register_backend(request, user_profile)
# Does not need to be authenticated because it's called from rest_dispatch
@has_request_variables
def api_events_register(request, user_profile,
apply_markdown=REQ(default=False, validator=check_bool),
all_public_streams=REQ(default=None, validator=check_bool)):
return events_register_backend(request, user_profile,
apply_markdown=apply_markdown,
all_public_streams=all_public_streams)
def _default_all_public_streams(user_profile, all_public_streams):
if all_public_streams is not None:
return all_public_streams
else:
return user_profile.default_all_public_streams
def _default_narrow(user_profile, narrow):
default_stream = user_profile.default_events_register_stream
if not narrow and user_profile.default_events_register_stream is not None:
narrow = [('stream', default_stream.name)]
return narrow
@has_request_variables
def events_register_backend(request, user_profile, apply_markdown=True,
all_public_streams=None,
event_types=REQ(validator=check_list(check_string), default=None),
narrow=REQ(validator=check_list(check_list(check_string, length=2)), default=[]),
queue_lifespan_secs=REQ(converter=int, default=0)):
all_public_streams = _default_all_public_streams(user_profile, all_public_streams)
narrow = _default_narrow(user_profile, narrow)
ret = do_events_register(user_profile, request.client, apply_markdown,
event_types, queue_lifespan_secs, all_public_streams,
narrow=narrow)
return json_success(ret)
def deactivate_user_backend(request, user_profile, email):
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error('No such user')
if target.is_bot:
return json_error('No such user')
return _deactivate_user_profile_backend(request, user_profile, target)
def deactivate_bot_backend(request, user_profile, email):
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error('No such bot')
if not target.is_bot:
return json_error('No such bot')
return _deactivate_user_profile_backend(request, user_profile, target)
def _deactivate_user_profile_backend(request, user_profile, target):
if not user_profile.can_admin_user(target):
return json_error('Insufficient permission')
do_deactivate_user(target)
return json_success({})
def reactivate_user_backend(request, user_profile, email):
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error('No such user')
if not user_profile.can_admin_user(target):
return json_error('Insufficient permission')
do_reactivate_user(target)
return json_success({})
@has_request_variables
def update_user_backend(request, user_profile, email,
is_admin=REQ(default=None, validator=check_bool)):
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error('No such user')
if not user_profile.can_admin_user(target):
return json_error('Insufficient permission')
if is_admin is not None:
do_change_is_admin(target, is_admin)
return json_success({})
@require_realm_admin
def deactivate_stream_backend(request, user_profile, stream_name):
target = get_stream(stream_name, user_profile.realm)
if not target:
return json_error('No such stream name')
if target.invite_only and not subscribed_to_stream(user_profile, target):
return json_error('Cannot administer invite-only streams this way')
do_deactivate_stream(target)
return json_success({})
def avatar(request, email):
try:
user_profile = get_user_profile_by_email(email)
avatar_source = user_profile.avatar_source
except UserProfile.DoesNotExist:
avatar_source = 'G'
url = get_avatar_url(avatar_source, email)
if '?' in url:
sep = '&'
else:
sep = '?'
url += sep + request.META['QUERY_STRING']
return redirect(url)
def get_stream_name(stream):
if stream:
name = stream.name
else :
name = None
return name
def stream_or_none(stream_name, realm):
if stream_name == '':
return None
else:
stream = get_stream(stream_name, realm)
if not stream:
raise JsonableError('No such stream \'%s\'' % (stream_name, ))
return stream
@has_request_variables
def patch_bot_backend(request, user_profile, email,
full_name=REQ(default=None),
default_sending_stream=REQ(default=None),
default_events_register_stream=REQ(default=None),
default_all_public_streams=REQ(default=None, validator=check_bool)):
try:
bot = get_user_profile_by_email(email)
except:
return json_error('No such user')
if not user_profile.can_admin_user(bot):
return json_error('Insufficient permission')
if full_name is not None:
do_change_full_name(bot, full_name)
if default_sending_stream is not None:
stream = stream_or_none(default_sending_stream, bot.realm)
do_change_default_sending_stream(bot, stream)
if default_events_register_stream is not None:
stream = stream_or_none(default_events_register_stream, bot.realm)
do_change_default_events_register_stream(bot, stream)
if default_all_public_streams is not None:
do_change_default_all_public_streams(bot, default_all_public_streams)
if len(request.FILES) == 0:
pass
elif len(request.FILES) == 1:
user_file = request.FILES.values()[0]
upload_avatar_image(user_file, user_profile, bot.email)
avatar_source = UserProfile.AVATAR_FROM_USER
do_change_avatar_source(bot, avatar_source)
else:
return json_error("You may only upload one file at a time")
json_result = dict(
full_name=bot.full_name,
avatar_url=avatar_url(bot),
default_sending_stream=get_stream_name(bot.default_sending_stream),
default_events_register_stream=get_stream_name(bot.default_events_register_stream),
default_all_public_streams=bot.default_all_public_streams,
)
return json_success(json_result)
@authenticated_json_post_view
def json_set_avatar(request, user_profile):
if len(request.FILES) != 1:
return json_error("You must upload exactly one avatar.")
user_file = request.FILES.values()[0]
upload_avatar_image(user_file, user_profile, user_profile.email)
do_change_avatar_source(user_profile, UserProfile.AVATAR_FROM_USER)
user_avatar_url = avatar_url(user_profile)
json_result = dict(
avatar_url = user_avatar_url
)
return json_success(json_result)
@has_request_variables
def regenerate_api_key(request, user_profile):
do_regenerate_api_key(user_profile)
json_result = dict(
api_key = user_profile.api_key
)
return json_success(json_result)
@has_request_variables
def regenerate_bot_api_key(request, user_profile, email):
try:
bot = get_user_profile_by_email(email)
except:
return json_error('No such user')
if not user_profile.can_admin_user(bot):
return json_error('Insufficient permission')
do_regenerate_api_key(bot)
json_result = dict(
api_key = bot.api_key
)
return json_success(json_result)
@has_request_variables
def add_bot_backend(request, user_profile, full_name=REQ, short_name=REQ,
default_sending_stream=REQ(default=None),
default_events_register_stream=REQ(default=None),
default_all_public_streams=REQ(validator=check_bool, default=None)):
short_name += "-bot"
email = short_name + "@" + user_profile.realm.domain
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
# We validate client-side as well
return json_error('Bad name or username')
try:
get_user_profile_by_email(email)
return json_error("Username already in use")
except UserProfile.DoesNotExist:
pass
if len(request.FILES) == 0:
avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
elif len(request.FILES) != 1:
return json_error("You may only upload one file at a time")
else:
user_file = request.FILES.values()[0]
upload_avatar_image(user_file, user_profile, email)
avatar_source = UserProfile.AVATAR_FROM_USER
if default_sending_stream is not None:
default_sending_stream = stream_or_none(default_sending_stream, user_profile.realm)
if default_sending_stream and not default_sending_stream.is_public() and not \
subscribed_to_stream(user_profile, default_sending_stream):
return json_error('Insufficient permission')
if default_events_register_stream is not None:
default_events_register_stream = stream_or_none(default_events_register_stream,
user_profile.realm)
if default_events_register_stream and not default_events_register_stream.is_public() and not \
subscribed_to_stream(user_profile, default_events_register_stream):
return json_error('Insufficient permission')
bot_profile = do_create_user(email=email, password='',
realm=user_profile.realm, full_name=full_name,
short_name=short_name, active=True, bot=True,
bot_owner=user_profile,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
json_result = dict(
api_key=bot_profile.api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=get_stream_name(bot_profile.default_sending_stream),
default_events_register_stream=get_stream_name(bot_profile.default_events_register_stream),
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success(json_result)
def get_bots_backend(request, user_profile):
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
bot_profiles = bot_profiles.select_related('default_sending_stream', 'default_events_register_stream')
bot_profiles = bot_profiles.order_by('date_joined')
def bot_info(bot_profile):
default_sending_stream = get_stream_name(bot_profile.default_sending_stream)
default_events_register_stream = get_stream_name(bot_profile.default_events_register_stream)
return dict(
username=bot_profile.email,
full_name=bot_profile.full_name,
api_key=bot_profile.api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success({'bots': map(bot_info, bot_profiles)})
@authenticated_json_post_view
@has_request_variables
def json_refer_friend(request, user_profile, email=REQ):
if not email:
return json_error("No email address specified")
if user_profile.invites_granted - user_profile.invites_used <= 0:
return json_error("Insufficient invites")
do_refer_friend(user_profile, email);
return json_success()
def list_alert_words(request, user_profile):
return json_success({'alert_words': user_alert_words(user_profile)})
@authenticated_json_post_view
@has_request_variables
def json_set_alert_words(request, user_profile,
alert_words=REQ(validator=check_list(check_string), default=[])):
do_set_alert_words(user_profile, alert_words)
return json_success()
@has_request_variables
def set_alert_words(request, user_profile,
alert_words=REQ(validator=check_list(check_string), default=[])):
do_set_alert_words(user_profile, alert_words)
return json_success()
@has_request_variables
def add_alert_words(request, user_profile,
alert_words=REQ(validator=check_list(check_string), default=[])):
do_add_alert_words(user_profile, alert_words)
return json_success()
@has_request_variables
def remove_alert_words(request, user_profile,
alert_words=REQ(validator=check_list(check_string), default=[])):
do_remove_alert_words(user_profile, alert_words)
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_set_muted_topics(request, user_profile,
muted_topics=REQ(validator=check_list(check_list(check_string, length=2)), default=[])):
do_set_muted_topics(user_profile, muted_topics)
return json_success()
def add_push_device_token(request, user_profile, token, kind, ios_app_id=None):
if token == '' or len(token) > 4096:
return json_error('Empty or invalid length token')
# If another user was previously logged in on the same device and didn't
# properly log out, the token will still be registered to the wrong account
PushDeviceToken.objects.filter(token=token).delete()
# Overwrite with the latest value
token, created = PushDeviceToken.objects.get_or_create(user=user_profile,
token=token,
kind=kind,
ios_app_id=ios_app_id)
if not created:
token.last_updated = now()
token.save(update_fields=['last_updated'])
return json_success()
@has_request_variables
def add_apns_device_token(request, user_profile, token=REQ, appid=REQ(default=settings.ZULIP_IOS_APP_ID)):
return add_push_device_token(request, user_profile, token, PushDeviceToken.APNS, ios_app_id=appid)
@has_request_variables
def add_android_reg_id(request, user_profile, token=REQ):
return add_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def remove_push_device_token(request, user_profile, token, kind):
if token == '' or len(token) > 4096:
return json_error('Empty or invalid length token')
try:
token = PushDeviceToken.objects.get(token=token, kind=kind)
token.delete()
except PushDeviceToken.DoesNotExist:
return json_error("Token does not exist")
return json_success()
@has_request_variables
def remove_apns_device_token(request, user_profile, token=REQ):
return remove_push_device_token(request, user_profile, token, PushDeviceToken.APNS)
@has_request_variables
def remove_android_reg_id(request, user_profile, token=REQ):
return remove_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def generate_204(request):
return HttpResponse(content=None, status=204)
def process_unsubscribe(token, type, unsubscribe_function):
try:
confirmation = Confirmation.objects.get(confirmation_key=token)
except Confirmation.DoesNotExist:
return render_to_response('zerver/unsubscribe_link_error.html')
user_profile = confirmation.content_object
unsubscribe_function(user_profile)
return render_to_response('zerver/unsubscribe_success.html',
{"subscription_type": type,
"external_host": settings.EXTERNAL_HOST})
# Email unsubscribe functions. All have the function signature
# processor(user_profile).
def do_missedmessage_unsubscribe(user_profile):
do_change_enable_offline_email_notifications(user_profile, False)
def do_welcome_unsubscribe(user_profile):
clear_followup_emails_queue(user_profile.email)
def do_digest_unsubscribe(user_profile):
do_change_enable_digest_emails(user_profile, False)
# The keys are part of the URL for the unsubscribe link and must be valid
# without encoding.
# The values are a tuple of (display name, unsubscribe function), where the
# display name is what we call this class of email in user-visible text.
email_unsubscribers = {
"missed_messages": ("missed messages", do_missedmessage_unsubscribe),
"welcome": ("welcome", do_welcome_unsubscribe),
"digest": ("digest", do_digest_unsubscribe)
}
# Login NOT required. These are for one-click unsubscribes.
def email_unsubscribe(request, type, token):
if type in email_unsubscribers:
display_name, unsubscribe_function = email_unsubscribers[type]
return process_unsubscribe(token, display_name, unsubscribe_function)
return render_to_response('zerver/unsubscribe_link_error.html', {},
context_instance=RequestContext(request))
|
BigDataforYou/movie_recommendation_workshop_1
|
refs/heads/master
|
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/numpy/distutils/setup.py
|
263
|
#!/usr/bin/env python
from __future__ import division, print_function
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('distutils', parent_package, top_path)
config.add_subpackage('command')
config.add_subpackage('fcompiler')
config.add_data_dir('tests')
config.add_data_files('site.cfg')
config.add_data_files('mingw/gfortran_vs2003_hack.c')
config.make_config_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
ujenmr/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/elasticache_facts.py
|
27
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: elasticache_facts
short_description: Retrieve facts for AWS Elasticache clusters
description:
- Retrieve facts from AWS Elasticache clusters
version_added: "2.5"
options:
name:
description:
- The name of an Elasticache cluster
author:
- Will Thames (@willthames)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: obtain all Elasticache facts
elasticache_facts:
- name: obtain all facts for a single Elasticache cluster
elasticache_facts:
name: test_elasticache
'''
RETURN = '''
elasticache_clusters:
description: List of elasticache clusters
returned: always
type: complex
contains:
auto_minor_version_upgrade:
description: Whether to automatically upgrade to minor versions
returned: always
type: bool
sample: true
cache_cluster_create_time:
description: Date and time cluster was created
returned: always
type: str
sample: '2017-09-15T05:43:46.038000+00:00'
cache_cluster_id:
description: ID of the cache cluster
returned: always
type: str
sample: abcd-1234-001
cache_cluster_status:
description: Status of Elasticache cluster
returned: always
type: str
sample: available
cache_node_type:
description: Instance type of Elasticache nodes
returned: always
type: str
sample: cache.t2.micro
cache_nodes:
description: List of Elasticache nodes in the cluster
returned: always
type: complex
contains:
cache_node_create_time:
description: Date and time node was created
returned: always
type: str
sample: '2017-09-15T05:43:46.038000+00:00'
cache_node_id:
description: ID of the cache node
returned: always
type: str
sample: '0001'
cache_node_status:
description: Status of the cache node
returned: always
type: str
sample: available
customer_availability_zone:
description: Availability Zone in which the cache node was created
returned: always
type: str
sample: ap-southeast-2b
endpoint:
description: Connection details for the cache node
returned: always
type: complex
contains:
address:
description: URL of the cache node endpoint
returned: always
type: str
sample: abcd-1234-001.bgiz2p.0001.apse2.cache.amazonaws.com
port:
description: Port of the cache node endpoint
returned: always
type: int
sample: 6379
parameter_grou_status:
description: Status of the Cache Parameter Group
returned: always
type: str
sample: in-sync
cache_parameter_group:
description: Contents of the Cache Parameter GGroup
returned: always
type: complex
contains:
cache_node_ids_to_reboot:
description: Cache nodes which need to be rebooted for parameter changes to be applied
returned: always
type: list
sample: []
cache_parameter_group_name:
description: Name of the cache parameter group
returned: always
type: str
sample: default.redis3.2
parameter_apply_status:
description: Status of parameter updates
returned: always
type: str
sample: in-sync
cache_security_groups:
description: Security Groups used by the cache
returned: always
type: list
sample:
- 'sg-abcd1234'
cache_subnet_group_name:
description: Elasticache Subnet Group used by the cache
returned: always
type: str
sample: abcd-subnet-group
client_download_landing_page:
description: URL of client download web page
returned: always
type: str
sample: 'https://console.aws.amazon.com/elasticache/home#client-download:'
engine:
description: Engine used by elasticache
returned: always
type: str
sample: redis
engine_version:
description: Version of elasticache engine
returned: always
type: str
sample: 3.2.4
notification_configuration:
description: Configuration of notifications
returned: if notifications are enabled
type: complex
contains:
topic_arn:
description: ARN of notification destination topic
returned: if notifications are enabled
type: str
sample: arn:aws:sns:*:123456789012:my_topic
topic_name:
description: Name of notification destination topic
returned: if notifications are enabled
type: str
sample: MyTopic
num_cache_nodes:
description: Number of Cache Nodes
returned: always
type: int
sample: 1
pending_modified_values:
description: Values that are pending modification
returned: always
type: complex
contains: {}
preferred_availability_zone:
description: Preferred Availability Zone
returned: always
type: str
sample: ap-southeast-2b
preferred_maintenance_window:
description: Time slot for preferred maintenance window
returned: always
type: str
sample: sat:12:00-sat:13:00
replication_group_id:
description: Replication Group Id
returned: always
type: str
sample: replication-001
security_groups:
description: List of Security Groups associated with Elasticache
returned: always
type: complex
contains:
security_group_id:
description: Security Group ID
returned: always
type: str
sample: sg-abcd1234
status:
description: Status of Security Group
returned: always
type: str
sample: active
tags:
description: Tags applied to the elasticache cluster
returned: always
type: complex
sample:
Application: web
Environment: test
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
@AWSRetry.exponential_backoff()
def describe_cache_clusters_with_backoff(client, cluster_id=None):
paginator = client.get_paginator('describe_cache_clusters')
params = dict(ShowCacheNodeInfo=True)
if cluster_id:
params['CacheClusterId'] = cluster_id
try:
response = paginator.paginate(**params).build_full_result()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'CacheClusterNotFound':
return []
raise
except botocore.exceptions.BotoCoreError:
raise
return response['CacheClusters']
@AWSRetry.exponential_backoff()
def get_elasticache_tags_with_backoff(client, cluster_id):
return client.list_tags_for_resource(ResourceName=cluster_id)['TagList']
def get_aws_account_id(module):
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Can't authorize connection")
try:
return client.get_caller_identity()['Account']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain AWS account id")
def get_elasticache_clusters(client, module, region):
try:
clusters = describe_cache_clusters_with_backoff(client, cluster_id=module.params.get('name'))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain cache cluster info")
account_id = get_aws_account_id(module)
results = []
for cluster in clusters:
cluster = camel_dict_to_snake_dict(cluster)
arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster['cache_cluster_id'])
try:
tags = get_elasticache_tags_with_backoff(client, arn)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get tags for cluster %s")
cluster['tags'] = boto3_tag_list_to_ansible_dict(tags)
results.append(cluster)
return results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=False),
)
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='elasticache',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
module.exit_json(elasticache_clusters=get_elasticache_clusters(client, module, region))
if __name__ == '__main__':
main()
|
experiencecoin/experiencecoin
|
refs/heads/master
|
qa/rpc-tests/replace-by-fee.py
|
54
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test replace by fee code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
#print (node.getbalance(), amount, fee)
new_addr = node.getnewaddress()
#print new_addr
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
#print i, txout['scriptPubKey']['addresses']
if txout['scriptPubKey']['addresses'] == [new_addr]:
#print i
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug",
"-relaypriority=0", "-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"
]))
self.is_network_split = False
def run_test(self):
make_utxo(self.nodes[0], 1*COIN)
print("Running test simple doublespend...")
self.test_simple_doublespend()
print("Running test doublespend chain...")
self.test_doublespend_chain()
print("Running test doublespend tree...")
self.test_doublespend_tree()
print("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
print("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
print("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
print("Running test too many replacements...")
self.test_too_many_replacements()
print("Running test opt-in...")
self.test_opt_in()
print("Running test prioritised transactions...")
self.test_prioritised_transactions()
print("Passed\n")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# Extra 0.1 BTC fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 BTC - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False) # transaction mistakenly accepted!
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
# 1 BTC fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
try:
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
else:
assert(False)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
try:
tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
actual_fee = initial_nValue - split_value*(MAX_REPLACEMENT_LIMIT+1)
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
try:
self.nodes[0].sendrawtransaction(double_tx_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
assert_equal("too many potential replacements" in exp.error['message'], True)
else:
assert(False)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
""" Replacing should only work if orig tx opted in """
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
print(tx1b_txid)
assert(False)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
try:
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
try:
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26)
else:
assert(False)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
if __name__ == '__main__':
ReplaceByFeeTest().main()
|
TangHao1987/intellij-community
|
refs/heads/master
|
python/testData/completion/exportedConstants/a.after.py
|
83
|
from Xkinter import *
LEFT<caret>
|
Deisss/python-sockjsroom
|
refs/heads/master
|
sockjsroom/jsonParser.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple MongoDB parser for output pretty JSON with ObjectID support
"""
try:
import json
except ImportError:
import simplejson as json
import datetime
Parser = None
# Limit import
__all__ = ["Parser"]
class DefaultJsonParser(json.JSONEncoder):
""" Create a basic JSON parser instance """
def default(self, obj):
""" Output data """
# Printer for datetime object
if isinstance(obj, datetime.datetime):
return obj.isoformat()
# Switch to default handler
return json.JSONEncoder.default(self, obj)
# Setting parser to default one
Parser = DefaultJsonParser
try:
import bson.objectid
# Import was a success, we add Mongo ObjectId compatibility
class MongoJsonParser(DefaultJsonParser):
""" Specific MongoDB manage ObjectId """
def default(self, obj):
""" Output data """
# Printer for MongoDB ObjectId
if isinstance(obj, bson.objectid.ObjectId):
return str(obj)
return DefaultJsonParser.default(self, obj)
# Switch parser to new mongo supported one
Parser = MongoJsonParser
except ImportError:
pass
|
OCForks/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
|
122
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import errno
import hashlib
import os
import re
from webkitpy.common.system import path
class MockFileSystem(object):
sep = '/'
pardir = '..'
def __init__(self, files=None, dirs=None, cwd='/'):
"""Initializes a "mock" filesystem that can be used to completely
stub out a filesystem.
Args:
files: a dict of filenames -> file contents. A file contents
value of None is used to indicate that the file should
not exist.
"""
self.files = files or {}
self.written_files = {}
self.last_tmpdir = None
self.current_tmpno = 0
self.cwd = cwd
self.dirs = set(dirs or [])
self.dirs.add(cwd)
for f in self.files:
d = self.dirname(f)
while not d in self.dirs:
self.dirs.add(d)
d = self.dirname(d)
def clear_written_files(self):
# This function can be used to track what is written between steps in a test.
self.written_files = {}
def _raise_not_found(self, path):
raise IOError(errno.ENOENT, path, os.strerror(errno.ENOENT))
def _split(self, path):
# This is not quite a full implementation of os.path.split
# http://docs.python.org/library/os.path.html#os.path.split
if self.sep in path:
return path.rsplit(self.sep, 1)
return ('', path)
def abspath(self, path):
if os.path.isabs(path):
return self.normpath(path)
return self.abspath(self.join(self.cwd, path))
def realpath(self, path):
return self.abspath(path)
def basename(self, path):
return self._split(path)[1]
def expanduser(self, path):
if path[0] != "~":
return path
parts = path.split(self.sep, 1)
home_directory = self.sep + "Users" + self.sep + "mock"
if len(parts) == 1:
return home_directory
return home_directory + self.sep + parts[1]
def path_to_module(self, module_name):
return "/mock-checkout/Tools/Scripts/" + module_name.replace('.', '/') + ".py"
def chdir(self, path):
path = self.normpath(path)
if not self.isdir(path):
raise OSError(errno.ENOENT, path, os.strerror(errno.ENOENT))
self.cwd = path
def copyfile(self, source, destination):
if not self.exists(source):
self._raise_not_found(source)
if self.isdir(source):
raise IOError(errno.EISDIR, source, os.strerror(errno.EISDIR))
if self.isdir(destination):
raise IOError(errno.EISDIR, destination, os.strerror(errno.EISDIR))
if not self.exists(self.dirname(destination)):
raise IOError(errno.ENOENT, destination, os.strerror(errno.ENOENT))
self.files[destination] = self.files[source]
self.written_files[destination] = self.files[source]
def dirname(self, path):
return self._split(path)[0]
def exists(self, path):
return self.isfile(path) or self.isdir(path)
def files_under(self, path, dirs_to_skip=[], file_filter=None):
def filter_all(fs, dirpath, basename):
return True
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
if file_filter(self, self.dirname(path), self.basename(path)) and self.files[path] is not None:
files.append(path)
return files
if self.basename(path) in dirs_to_skip:
return []
if not path.endswith(self.sep):
path += self.sep
dir_substrings = [self.sep + d + self.sep for d in dirs_to_skip]
for filename in self.files:
if not filename.startswith(path):
continue
suffix = filename[len(path) - 1:]
if any(dir_substring in suffix for dir_substring in dir_substrings):
continue
dirpath, basename = self._split(filename)
if file_filter(self, dirpath, basename) and self.files[filename] is not None:
files.append(filename)
return files
def getcwd(self):
return self.cwd
def glob(self, glob_string):
# FIXME: This handles '*', but not '?', '[', or ']'.
glob_string = re.escape(glob_string)
glob_string = glob_string.replace('\\*', '[^\\/]*') + '$'
glob_string = glob_string.replace('\\/', '/')
path_filter = lambda path: re.match(glob_string, path)
# We could use fnmatch.fnmatch, but that might not do the right thing on windows.
existing_files = [path for path, contents in self.files.items() if contents is not None]
return filter(path_filter, existing_files) + filter(path_filter, self.dirs)
def isabs(self, path):
return path.startswith(self.sep)
def isfile(self, path):
return path in self.files and self.files[path] is not None
def isdir(self, path):
return self.normpath(path) in self.dirs
def _slow_but_correct_join(self, *comps):
return re.sub(re.escape(os.path.sep), self.sep, os.path.join(*comps))
def join(self, *comps):
# This function is called a lot, so we optimize it; there are
# unittests to check that we match _slow_but_correct_join(), above.
path = ''
sep = self.sep
for comp in comps:
if not comp:
continue
if comp[0] == sep:
path = comp
continue
if path:
path += sep
path += comp
if comps[-1] == '' and path:
path += '/'
path = path.replace(sep + sep, sep)
return path
def listdir(self, path):
sep = self.sep
if not self.isdir(path):
raise OSError("%s is not a directory" % path)
if not path.endswith(sep):
path += sep
dirs = []
files = []
for f in self.files:
if self.exists(f) and f.startswith(path):
remaining = f[len(path):]
if sep in remaining:
dir = remaining[:remaining.index(sep)]
if not dir in dirs:
dirs.append(dir)
else:
files.append(remaining)
return dirs + files
def mtime(self, path):
if self.exists(path):
return 0
self._raise_not_found(path)
def _mktemp(self, suffix='', prefix='tmp', dir=None, **kwargs):
if dir is None:
dir = self.sep + '__im_tmp'
curno = self.current_tmpno
self.current_tmpno += 1
self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
return self.last_tmpdir
def mkdtemp(self, **kwargs):
class TemporaryDirectory(object):
def __init__(self, fs, **kwargs):
self._kwargs = kwargs
self._filesystem = fs
self._directory_path = fs._mktemp(**kwargs)
fs.maybe_make_directory(self._directory_path)
def __str__(self):
return self._directory_path
def __enter__(self):
return self._directory_path
def __exit__(self, type, value, traceback):
# Only self-delete if necessary.
# FIXME: Should we delete non-empty directories?
if self._filesystem.exists(self._directory_path):
self._filesystem.rmtree(self._directory_path)
return TemporaryDirectory(fs=self, **kwargs)
def maybe_make_directory(self, *path):
norm_path = self.normpath(self.join(*path))
while norm_path and not self.isdir(norm_path):
self.dirs.add(norm_path)
norm_path = self.dirname(norm_path)
def move(self, source, destination):
if self.files[source] is None:
self._raise_not_found(source)
self.files[destination] = self.files[source]
self.written_files[destination] = self.files[destination]
self.files[source] = None
self.written_files[source] = None
def _slow_but_correct_normpath(self, path):
return re.sub(re.escape(os.path.sep), self.sep, os.path.normpath(path))
def normpath(self, path):
# This function is called a lot, so we try to optimize the common cases
# instead of always calling _slow_but_correct_normpath(), above.
if '..' in path or '/./' in path:
# This doesn't happen very often; don't bother trying to optimize it.
return self._slow_but_correct_normpath(path)
if not path:
return '.'
if path == '/':
return path
if path == '/.':
return '/'
if path.endswith('/.'):
return path[:-2]
if path.endswith('/'):
return path[:-1]
return path
def open_binary_tempfile(self, suffix=''):
path = self._mktemp(suffix)
return (WritableBinaryFileObject(self, path), path)
def open_binary_file_for_reading(self, path):
if self.files[path] is None:
self._raise_not_found(path)
return ReadableBinaryFileObject(self, path, self.files[path])
def read_binary_file(self, path):
# Intentionally raises KeyError if we don't recognize the path.
if self.files[path] is None:
self._raise_not_found(path)
return self.files[path]
def write_binary_file(self, path, contents):
# FIXME: should this assert if dirname(path) doesn't exist?
self.maybe_make_directory(self.dirname(path))
self.files[path] = contents
self.written_files[path] = contents
def open_text_file_for_reading(self, path):
if self.files[path] is None:
self._raise_not_found(path)
return ReadableTextFileObject(self, path, self.files[path])
def open_text_file_for_writing(self, path):
return WritableTextFileObject(self, path)
def read_text_file(self, path):
return self.read_binary_file(path).decode('utf-8')
def write_text_file(self, path, contents):
return self.write_binary_file(path, contents.encode('utf-8'))
def sha1(self, path):
contents = self.read_binary_file(path)
return hashlib.sha1(contents).hexdigest()
def relpath(self, path, start='.'):
# Since os.path.relpath() calls os.path.normpath()
# (see http://docs.python.org/library/os.path.html#os.path.abspath )
# it also removes trailing slashes and converts forward and backward
# slashes to the preferred slash os.sep.
start = self.abspath(start)
path = self.abspath(path)
if not path.lower().startswith(start.lower()):
# path is outside the directory given by start; compute path from root
return '../' * start.count('/') + path
rel_path = path[len(start):]
if not rel_path:
# Then the paths are the same.
pass
elif rel_path[0] == self.sep:
# It is probably sufficient to remove just the first character
# since os.path.normpath() collapses separators, but we use
# lstrip() just to be sure.
rel_path = rel_path.lstrip(self.sep)
else:
# We are in the case typified by the following example:
# path = "/tmp/foobar", start = "/tmp/foo" -> rel_path = "bar"
# FIXME: We return a less-than-optimal result here.
return '../' * start.count('/') + path
return rel_path
def remove(self, path):
if self.files[path] is None:
self._raise_not_found(path)
self.files[path] = None
self.written_files[path] = None
def rmtree(self, path):
path = self.normpath(path)
for f in self.files:
if f.startswith(path):
self.files[f] = None
self.dirs = set(filter(lambda d: not d.startswith(path), self.dirs))
def copytree(self, source, destination):
source = self.normpath(source)
destination = self.normpath(destination)
for source_file in self.files:
if source_file.startswith(source):
destination_path = self.join(destination, self.relpath(source_file, source))
self.maybe_make_directory(self.dirname(destination_path))
self.files[destination_path] = self.files[source_file]
def split(self, path):
idx = path.rfind(self.sep)
if idx == -1:
return ('', path)
return (path[:idx], path[(idx + 1):])
def splitext(self, path):
idx = path.rfind('.')
if idx == -1:
idx = len(path)
return (path[0:idx], path[idx:])
class WritableBinaryFileObject(object):
def __init__(self, fs, path):
self.fs = fs
self.path = path
self.closed = False
self.fs.files[path] = ""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.closed = True
def write(self, str):
self.fs.files[self.path] += str
self.fs.written_files[self.path] = self.fs.files[self.path]
class WritableTextFileObject(WritableBinaryFileObject):
def write(self, str):
WritableBinaryFileObject.write(self, str.encode('utf-8'))
class ReadableBinaryFileObject(object):
def __init__(self, fs, path, data):
self.fs = fs
self.path = path
self.closed = False
self.data = data
self.offset = 0
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.closed = True
def read(self, bytes=None):
if not bytes:
return self.data[self.offset:]
start = self.offset
self.offset += bytes
return self.data[start:self.offset]
class ReadableTextFileObject(ReadableBinaryFileObject):
def __init__(self, fs, path, data):
super(ReadableTextFileObject, self).__init__(fs, path, StringIO.StringIO(data.decode("utf-8")))
def close(self):
self.data.close()
super(ReadableTextFileObject, self).close()
def read(self, bytes=-1):
return self.data.read(bytes)
def readline(self, length=None):
return self.data.readline(length)
def __iter__(self):
return self.data.__iter__()
def next(self):
return self.data.next()
def seek(self, offset, whence=os.SEEK_SET):
self.data.seek(offset, whence)
|
okfde/froide-campaign
|
refs/heads/master
|
froide_campaign/migrations/0002_auto_20160123_1454.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('froide_campaign', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='informationobject',
options={'ordering': ('ordering',)},
),
migrations.AddField(
model_name='informationobject',
name='ordering',
field=models.CharField(max_length=255, blank=True),
),
migrations.AlterField(
model_name='campaign',
name='template',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='context',
field=models.JSONField(blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='documents',
field=models.ManyToManyField(to='foirequest.FoiAttachment', blank=True),
),
migrations.AlterField(
model_name='informationobject',
name='foirequest',
field=models.ForeignKey(blank=True, to='foirequest.FoiRequest', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
migrations.AlterField(
model_name='informationobject',
name='publicbody',
field=models.ForeignKey(blank=True, to='publicbody.PublicBody', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
|
pozdnyakov/chromium-crosswalk
|
refs/heads/master
|
chrome/common/extensions/docs/server2/render_servlet.py
|
3
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from fnmatch import fnmatch
import logging
import mimetypes
import traceback
from urlparse import urlsplit
from branch_utility import BranchUtility
from file_system import FileNotFoundError
from servlet import Servlet, Response
import svn_constants
def _IsBinaryMimetype(mimetype):
return any(
mimetype.startswith(prefix) for prefix in ['audio', 'image', 'video'])
class RenderServlet(Servlet):
'''Servlet which renders templates.
'''
class Delegate(object):
def CreateServerInstanceForChannel(self, channel):
raise NotImplementedError()
def __init__(self, request, delegate, default_channel='stable'):
Servlet.__init__(self, request)
self._delegate = delegate
self._default_channel = default_channel
def Get(self):
''' Render the page for a request.
'''
headers = self._request.headers
channel, path = BranchUtility.SplitChannelNameFromPath(self._request.path)
if path.split('/')[-1] == 'redirects.json':
return Response.Ok('')
if channel == self._default_channel:
return Response.Redirect('/' + path)
if channel is None:
channel = self._default_channel
server_instance = self._delegate.CreateServerInstanceForChannel(channel)
redirect = server_instance.redirector.Redirect(self._request.host, path)
if redirect is not None:
if (channel != self._default_channel and
not urlsplit(redirect).scheme in ('http', 'https')):
redirect = '/%s%s' % (channel, redirect)
return Response.Redirect(redirect)
canonical_path = server_instance.path_canonicalizer.Canonicalize(path)
redirect = canonical_path.lstrip('/')
if path != redirect:
if channel is not None:
redirect = '%s/%s' % (channel, canonical_path)
return Response.Redirect('/' + redirect)
templates = server_instance.template_data_source_factory.Create(
self._request, path)
content = None
content_type = None
try:
if fnmatch(path, 'extensions/examples/*.zip'):
content = server_instance.example_zipper.Create(
path[len('extensions/'):-len('.zip')])
content_type = 'application/zip'
elif path.startswith('extensions/examples/'):
mimetype = mimetypes.guess_type(path)[0] or 'text/plain'
content = server_instance.content_cache.GetFromFile(
'%s/%s' % (svn_constants.DOCS_PATH, path[len('extensions/'):]),
binary=_IsBinaryMimetype(mimetype))
content_type = mimetype
elif path.startswith('static/'):
mimetype = mimetypes.guess_type(path)[0] or 'text/plain'
content = server_instance.content_cache.GetFromFile(
('%s/%s' % (svn_constants.DOCS_PATH, path)),
binary=_IsBinaryMimetype(mimetype))
content_type = mimetype
elif path.endswith('.html'):
content = templates.Render(path)
content_type = 'text/html'
except FileNotFoundError:
logging.warning(traceback.format_exc())
content = None
headers = {'x-frame-options': 'sameorigin'}
if content is None:
doc_class = path.split('/', 1)[0]
content = templates.Render('%s/404' % doc_class)
if not content:
content = templates.Render('extensions/404')
return Response.NotFound(content, headers=headers)
if not content:
logging.error('%s had empty content' % path)
headers.update({
'content-type': content_type,
'cache-control': 'max-age=300',
})
return Response.Ok(content, headers=headers)
|
ecatkins/instabilly
|
refs/heads/master
|
project/project/settings.py
|
1
|
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nrax)%x1$^8=l)1q3cxy99-m4dv)9olohyhq4ax^!g))tc8wpj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'spotify'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
from spotify.secret import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'spotify',
'USER': DB_USER,
'PASSWORD': DB_PASSWORD,
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
SESSION_COOKIE_SECCURE = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
EMAIL_HOST = email
EMAIL_HOST_USER = email_host_user
EMAIL_HOST_PASSWORD = email_host_password
EMAIL_PORT = email_port
EMAIL_USE_TLS = email_use_tls
|
vrenaville/ngo-addons-backport
|
refs/heads/master
|
addons/l10n_si/__init__.py
|
439
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright: (C) 2012 - Mentis d.o.o., Dravograd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard
|
cellnopt/cellnopt
|
refs/heads/master
|
test/core/test_base.py
|
1
|
from cno.core.base import CNORBase, CNOBase
from cno import cnodata
from easydev import TempFile
# To test some of the base functions, need to use something else such as cnorbool
def test_cnobase():
c = CNOBase(cnodata('PKN-ToyMMB.sif'), cnodata("MD-ToyMMB.csv"))
c.pknmodel
c.midas
c.data
c.preprocessing()
c.plot_pknmodel()
assert c._reac_cnor2cno(['A+B=C']) == ['A^B=C']
c.plot_midas()
c.plot_midas(xkcd=True)
c.config
fh = TempFile()
c.save_config_file(fh.name)
c = CNOBase(cnodata('PKN-ToyMMB.sif'), cnodata("MD-ToyMMB.csv"), config=fh.name)
try:
c.create_report()
assert False
except:
assert True
try:
c.create_report_images()
assert False
except:
assert True
from cno.boolean.cnorbool import CNORbool
def test_cnobase_with_cnorbool():
c = CNORbool(cnodata("PKN-ToyMMB.sif"), cnodata("MD-ToyMMB.csv"), verbose=True)
c.verboseR = True
c.verboseR = False
c.verbose = False
c.optimise(maxgens=5, popsize=10)
c.plot_fitness()
c.plot_model()
c.plot_optimised_model()
c.plot_mapback_model()
c._create_report_header()
c.onweb()
|
quantopian/PenguinDome
|
refs/heads/master
|
penguindome/client.py
|
1
|
# Quantopian, Inc. licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from functools import partial
import getpass
import json
import os
import requests
import sys
from tempfile import NamedTemporaryFile
from penguindome import (
load_settings,
get_setting as main_get_setting,
set_setting as main_set_setting,
get_logger as main_get_logger,
save_settings as main_save_settings,
get_selectors as main_get_selectors,
encrypt_document as main_encrypt_document,
client_gpg_version,
gpg_command as main_gpg_command,
top_dir,
)
gpg_command = partial(main_gpg_command, with_user_id=True,
minimum_version=client_gpg_version)
session = None
def get_setting(setting, default=None, check_defaults=True):
"""Fetch a setting from `client/settings.xml`
`setting` is a colon-separated list of keys and to transit to fetch the
desired setting. For example, `logging:handler` fetches the type Logbook
handler configured on the client.
`default` is the value to return if the setting does not exist.
`check_defaults` indicates whether `client/default-settings.yml` should be
checked if the specified setting isn't in `client/settings.xml`.
Returns None if the setting does not exist.
"""
return main_get_setting(load_settings('client'), setting, default,
check_defaults)
def set_setting(setting, value):
return main_set_setting(load_settings('client'), setting, value)
def save_settings():
main_save_settings('client')
def get_logger(name):
return main_get_logger(get_setting, name, fail_to_local=True)
def get_selectors():
return main_get_selectors(get_setting)
def encrypt_document(*args, **kwargs):
return main_encrypt_document(get_setting, *args, **kwargs)
def server_request(cmd, data=None, data_path=None,
exit_on_connection_error=False, logger=None,
# Clients should never need to use these. They are for
# internal use on the server.
local_port=None, signed=True):
global session
if session is None:
session = requests.Session()
server_url = 'http://127.0.0.1:{}'.format(local_port) if local_port \
else get_setting('server_url')
if data and data_path:
raise Exception('Both data and data_path specified')
with NamedTemporaryFile('w+') as temp_data_file, \
NamedTemporaryFile('w+') as signature_file:
if data_path:
data = open(data_path).read()
else:
data = json.dumps(data)
temp_data_file.write(data)
temp_data_file.flush()
data_path = temp_data_file.name
post_data = {'data': data}
if signed:
gpg_command('--armor', '--detach-sign', '-o', signature_file.name,
data_path, log=logger)
signature_file.seek(0)
post_data['signature'] = signature_file.read()
kwargs = {
'data': post_data,
'timeout': 30,
}
if not local_port:
ca_path = get_setting('ssl:ca_path')
if ca_path:
if not ca_path.startswith('/'):
ca_path = os.path.join(top_dir, ca_path)
kwargs['verify'] = ca_path
try:
while True:
response = session.post('{}{}'.format(server_url, cmd), **kwargs)
if response.status_code == 401 and os.isatty(sys.stderr.fileno()):
username = input('Username:')
pw = getpass.getpass('Password:')
kwargs['auth'] = (username, pw)
continue
response.raise_for_status()
if 'auth' in kwargs and logger:
logger.info('Authenticated {} to {}', kwargs['auth'][0], cmd)
break
except (requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout):
if exit_on_connection_error:
sys.exit('Connection error posting to {}'.format(server_url))
raise
return response
|
FHannes/intellij-community
|
refs/heads/master
|
python/testData/codeInsight/controlflow/lambda.py
|
83
|
{(lambda i=i: i) for i in range(4)}
|
nikkitan/bitcoin
|
refs/heads/master
|
test/functional/feature_minchainwork.py
|
33
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
|
galarant/pixi_sandbox
|
refs/heads/master
|
backend/urls.py
|
1
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="index.html")),
url(r'^admin/', include(admin.site.urls)),
]
|
KevinMidboe/statusHandler
|
refs/heads/master
|
flask/lib/python3.4/keyword.py
|
162
|
#! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# load the output skeleton from the target, taking care to preserve its
# newline convention.
with open(optfile, newline='') as fp:
format = fp.readlines()
nl = format[0][len(format[0].strip()):] if format else '\n'
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "'," + nl)
lines.sort()
# insert the lines of keywords into the skeleton
try:
start = format.index("#--start keywords--" + nl) + 1
end = format.index("#--end keywords--" + nl)
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
with open(optfile, 'w', newline='') as fp:
fp.writelines(format)
if __name__ == "__main__":
main()
|
heeraj123/oh-mainline
|
refs/heads/master
|
vendor/packages/django-debug-toolbar/debug_toolbar/panels/templates/panel.py
|
20
|
from __future__ import absolute_import, unicode_literals
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from os.path import normpath
from pprint import pformat
import django
from django import http
from django.conf import settings
from django.conf.urls import patterns, url
from django.db.models.query import QuerySet, RawQuerySet
from django.template import Context, RequestContext, Template
from django.template.context import get_standard_processors
from django.test.signals import template_rendered
from django.test.utils import instrumented_test_render
from django.utils.encoding import force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import Panel
from debug_toolbar.panels.sql.tracking import recording, SQLQueryTriggered
# Monkey-patch to enable the template_rendered signal. The receiver returns
# immediately when the panel is disabled to keep the overhead small.
# Code taken and adapted from Simon Willison and Django Snippets:
# http://www.djangosnippets.org/snippets/766/
if Template._render != instrumented_test_render:
Template.original_render = Template._render
Template._render = instrumented_test_render
# Monkey-patch to store items added by template context processors. The
# overhead is sufficiently small to justify enabling it unconditionally.
def _request_context__init__(
self, request, dict_=None, processors=None, current_app=None,
use_l10n=None, use_tz=None):
Context.__init__(
self, dict_, current_app=current_app,
use_l10n=use_l10n, use_tz=use_tz)
if processors is None:
processors = ()
else:
processors = tuple(processors)
self.context_processors = OrderedDict()
updates = dict()
for processor in get_standard_processors() + processors:
name = '%s.%s' % (processor.__module__, processor.__name__)
context = processor(request)
self.context_processors[name] = context
updates.update(context)
self.update(updates)
RequestContext.__init__ = _request_context__init__
# Monkey-patch versions of Django where Template doesn't store origin.
# See https://code.djangoproject.com/ticket/16096.
if django.VERSION[:2] < (1, 7):
old_template_init = Template.__init__
def new_template_init(self, template_string, origin=None, name='<Unknown Template>'):
old_template_init(self, template_string, origin, name)
self.origin = origin
Template.__init__ = new_template_init
class TemplatesPanel(Panel):
"""
A panel that lists all templates used during processing of a response.
"""
def __init__(self, *args, **kwargs):
super(TemplatesPanel, self).__init__(*args, **kwargs)
self.templates = []
def _store_template_info(self, sender, **kwargs):
template, context = kwargs['template'], kwargs['context']
# Skip templates that we are generating through the debug toolbar.
if (isinstance(template.name, six.string_types) and
template.name.startswith('debug_toolbar/')):
return
context_list = []
for context_layer in context.dicts:
temp_layer = {}
if hasattr(context_layer, 'items'):
for key, value in context_layer.items():
# Replace any request elements - they have a large
# unicode representation and the request data is
# already made available from the Request panel.
if isinstance(value, http.HttpRequest):
temp_layer[key] = '<<request>>'
# Replace the debugging sql_queries element. The SQL
# data is already made available from the SQL panel.
elif key == 'sql_queries' and isinstance(value, list):
temp_layer[key] = '<<sql_queries>>'
# Replace LANGUAGES, which is available in i18n context processor
elif key == 'LANGUAGES' and isinstance(value, tuple):
temp_layer[key] = '<<languages>>'
# QuerySet would trigger the database: user can run the query from SQL Panel
elif isinstance(value, (QuerySet, RawQuerySet)):
model_name = "%s.%s" % (
value.model._meta.app_label, value.model.__name__)
temp_layer[key] = '<<%s of %s>>' % (
value.__class__.__name__.lower(), model_name)
else:
try:
recording(False)
pformat(value) # this MAY trigger a db query
except SQLQueryTriggered:
temp_layer[key] = '<<triggers database query>>'
except UnicodeEncodeError:
temp_layer[key] = '<<unicode encode error>>'
except Exception:
temp_layer[key] = '<<unhandled exception>>'
else:
temp_layer[key] = value
finally:
recording(True)
try:
context_list.append(pformat(temp_layer))
except UnicodeEncodeError:
pass
kwargs['context'] = [force_text(item) for item in context_list]
kwargs['context_processors'] = getattr(context, 'context_processors', None)
self.templates.append(kwargs)
# Implement the Panel API
nav_title = _("Templates")
@property
def title(self):
num_templates = len(self.templates)
return _("Templates (%(num_templates)s rendered)") % {'num_templates': num_templates}
@property
def nav_subtitle(self):
if self.templates:
return self.templates[0]['template'].name
return ''
template = 'debug_toolbar/panels/templates.html'
@classmethod
def get_urls(cls):
return patterns('debug_toolbar.panels.templates.views', # noqa
url(r'^template_source/$', 'template_source', name='template_source'),
)
def enable_instrumentation(self):
template_rendered.connect(self._store_template_info)
def disable_instrumentation(self):
template_rendered.disconnect(self._store_template_info)
def process_response(self, request, response):
template_context = []
for template_data in self.templates:
info = {}
# Clean up some info about templates
template = template_data.get('template', None)
if not hasattr(template, 'origin'):
continue
if template.origin and template.origin.name:
template.origin_name = template.origin.name
else:
template.origin_name = 'No origin'
info['template'] = template
# Clean up context for better readability
if self.toolbar.config['SHOW_TEMPLATE_CONTEXT']:
context_list = template_data.get('context', [])
info['context'] = '\n'.join(context_list)
template_context.append(info)
# Fetch context_processors from any template
if self.templates:
context_processors = self.templates[0]['context_processors']
else:
context_processors = None
self.record_stats({
'templates': template_context,
'template_dirs': [normpath(x) for x in settings.TEMPLATE_DIRS],
'context_processors': context_processors,
})
|
rfdougherty/dipy
|
refs/heads/master
|
dipy/viz/fvtk.py
|
5
|
''' Fvtk module implements simple visualization functions using VTK.
The main idea is the following:
A window can have one or more renderers. A renderer can have none, one or more actors. Examples of actors are a sphere, line, point etc.
You basically add actors in a renderer and in that way you can visualize the forementioned objects e.g. sphere, line ...
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #fvtk.show(r)
For more information on VTK there many neat examples in
http://www.vtk.org/Wiki/VTK/Tutorials/External_Tutorials
'''
from __future__ import division, print_function, absolute_import
from dipy.utils.six.moves import xrange
import types
import numpy as np
from dipy.core.ndindex import ndindex
# Conditional import machinery for vtk
from ..utils.optpkg import optional_package
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
colors, have_vtk_colors, _ = optional_package('vtk.util.colors')
cm, have_matplotlib, _ = optional_package('matplotlib.cm')
if have_matplotlib:
get_cmap = cm.get_cmap
else:
from dipy.data import get_cmap
# a track buffer used only with picking tracks
track_buffer = []
# indices buffer for the tracks
ind_buffer = []
# tempory renderer used only with picking tracks
tmp_ren = None
if have_vtk:
major_version = vtk.vtkVersion.GetVTKMajorVersion()
# Create a text mapper and actor to display the results of picking.
textMapper = vtk.vtkTextMapper()
tprop = textMapper.GetTextProperty()
tprop.SetFontFamilyToArial()
tprop.SetFontSize(10)
# tprop.BoldOn()
# tprop.ShadowOn()
tprop.SetColor(1, 0, 0)
textActor = vtk.vtkActor2D()
textActor.VisibilityOff()
textActor.SetMapper(textMapper)
# Create a cell picker.
picker = vtk.vtkCellPicker()
def ren():
'''Create a renderer.
Returns
-------
v : vtkRenderer() object
Renderer.
Examples
--------
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3)]
>>> c=fvtk.line(lines, fvtk.colors.red)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
return vtk.vtkRenderer()
def add(ren, a):
''' Add a specific actor
'''
if isinstance(a, vtk.vtkVolume):
ren.AddVolume(a)
else:
ren.AddActor(a)
def rm(ren, a):
''' Remove a specific actor
'''
ren.RemoveActor(a)
def clear(ren):
''' Remove all actors from the renderer
'''
ren.RemoveAllViewProps()
def rm_all(ren):
''' Remove all actors from the renderer
'''
clear(ren)
def _arrow(pos=(0, 0, 0), color=(1, 0, 0), scale=(1, 1, 1), opacity=1):
''' Internal function for generating arrow actors.
'''
arrow = vtk.vtkArrowSource()
# arrow.SetTipLength(length)
arrowm = vtk.vtkPolyDataMapper()
if major_version <= 5:
arrowm.SetInput(arrow.GetOutput())
else:
arrowm.SetInputData(arrow.GetOutput())
arrowa = vtk.vtkActor()
arrowa.SetMapper(arrowm)
arrowa.GetProperty().SetColor(color)
arrowa.GetProperty().SetOpacity(opacity)
arrowa.SetScale(scale)
return arrowa
def axes(scale=(1, 1, 1), colorx=(1, 0, 0), colory=(0, 1, 0), colorz=(0, 0, 1),
opacity=1):
""" Create an actor with the coordinate's system axes where
red = x, green = y, blue =z.
Parameters
----------
scale : tuple (3,)
axes size e.g. (100, 100, 100)
colorx : tuple (3,)
x-axis color. Default red.
colory : tuple (3,)
y-axis color. Default blue.
colorz : tuple (3,)
z-axis color. Default green.
Returns
-------
vtkAssembly
"""
arrowx = _arrow(color=colorx, scale=scale, opacity=opacity)
arrowy = _arrow(color=colory, scale=scale, opacity=opacity)
arrowz = _arrow(color=colorz, scale=scale, opacity=opacity)
arrowy.RotateZ(90)
arrowz.RotateY(-90)
ass = vtk.vtkAssembly()
ass.AddPart(arrowx)
ass.AddPart(arrowy)
ass.AddPart(arrowz)
return ass
def _lookup(colors):
''' Internal function
Creates a lookup table with given colors.
Parameters
------------
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1,
Returns
----------
vtkLookupTable
'''
colors = np.asarray(colors, dtype=np.float32)
if colors.ndim > 2:
raise ValueError('Incorrect shape of array in colors')
if colors.ndim == 1:
N = 1
if colors.ndim == 2:
N = colors.shape[0]
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(N)
lut.Build()
if colors.ndim == 2:
scalar = 0
for (r, g, b) in colors:
lut.SetTableValue(scalar, r, g, b, 1.0)
scalar += 1
if colors.ndim == 1:
lut.SetTableValue(0, colors[0], colors[1], colors[2], 1.0)
return lut
def streamtube(lines, colors, opacity=1, linewidth=0.15, tube_sides=8,
lod=True, lod_points=10 ** 4, lod_points_size=5):
""" Uses streamtubes to visualize polylines
Parameters
----------
lines : list
list of N curves represented as 2D ndarrays
colors : array (N, 3) or tuple (3,)
opacity : float
linewidth : float
tube_sides : int
lod : bool
use vtkLODActor rather than vtkActor
lod_points : int
number of points to be used when LOD is in effect
lod_points_size : int
size of points when lod is in effect
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10, 3), np.random.rand(20, 3)]
>>> colors=np.random.rand(2, 3)
>>> c=fvtk.streamtube(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
Notes
-----
Streamtubes can be heavy on GPU when loading many streamlines and therefore,
you may experience slow rendering time depending on system GPU. A solution
to this problem is to reduce the number of points in each streamline. In Dipy
we provide an algorithm that will reduce the number of points on the straighter
parts of the streamline but keep more points on the curvier parts. This can
be used in the following way
from dipy.tracking.distances import approx_polygon_track
lines = [approx_polygon_track(line, 0.2) for line in lines]
"""
points = vtk.vtkPoints()
colors = np.asarray(colors)
if colors.ndim == 1:
colors = np.tile(colors, (len(lines), 1))
# Create the polyline.
streamlines = vtk.vtkCellArray()
cols = vtk.vtkUnsignedCharArray()
cols.SetName("Cols")
cols.SetNumberOfComponents(3)
len_lines = len(lines)
prior_line_shape = 0
for i in range(len_lines):
line = lines[i]
streamlines.InsertNextCell(line.shape[0])
for j in range(line.shape[0]):
points.InsertNextPoint(*line[j])
streamlines.InsertCellPoint(j + prior_line_shape)
color = (255 * colors[i]).astype('ubyte')
cols.InsertNextTuple3(*color)
prior_line_shape += line.shape[0]
profileData = vtk.vtkPolyData()
profileData.SetPoints(points)
profileData.SetLines(streamlines)
profileData.GetPointData().AddArray(cols)
# Add thickness to the resulting line.
profileTubes = vtk.vtkTubeFilter()
profileTubes.SetNumberOfSides(tube_sides)
if major_version <= 5:
profileTubes.SetInput(profileData)
else:
profileTubes.SetInputData(profileData)
#profileTubes.SetInput(profileData)
profileTubes.SetRadius(linewidth)
profileMapper = vtk.vtkPolyDataMapper()
profileMapper.SetInputConnection(profileTubes.GetOutputPort())
profileMapper.ScalarVisibilityOn()
profileMapper.SetScalarModeToUsePointFieldData()
profileMapper.SelectColorArray("Cols")
profileMapper.GlobalImmediateModeRenderingOn()
if lod:
profile = vtk.vtkLODActor()
profile.SetNumberOfCloudPoints(lod_points)
profile.GetProperty().SetPointSize(lod_points_size)
else:
profile = vtk.vtkActor()
profile.SetMapper(profileMapper)
profile.GetProperty().SetAmbient(0) # .3
profile.GetProperty().SetSpecular(0) # .3
profile.GetProperty().SetSpecularPower(10)
profile.GetProperty().SetInterpolationToGouraud()
profile.GetProperty().BackfaceCullingOn()
profile.GetProperty().SetOpacity(opacity)
return profile
def line(lines, colors, opacity=1, linewidth=1):
''' Create an actor for one or more lines.
Parameters
------------
lines : list of arrays representing lines as 3d points for example
lines=[np.random.rand(10,3),np.random.rand(20,3)]
represents 2 lines the first with 10 points and the second with 20 points in x,y,z coordinates.
colors : array, shape (N,3)
Colormap where every triplet is encoding red, green and blue e.g.
::
r1,g1,b1
r2,g2,b2
...
rN,gN,bN
where
::
0=<r<=1,
0=<g<=1,
0=<b<=1
opacity : float, optional
``0 <= transparency <= 1``
linewidth : float, optional
Line thickness.
Returns
----------
v : vtkActor object
Line.
Examples
----------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3), np.random.rand(20,3)]
>>> colors=np.random.rand(2,3)
>>> c=fvtk.line(lines, colors)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
'''
if not isinstance(lines, types.ListType):
lines = [lines]
points = vtk.vtkPoints()
lines_ = vtk.vtkCellArray()
linescalars = vtk.vtkFloatArray()
# lookuptable=vtk.vtkLookupTable()
lookuptable = _lookup(colors)
scalarmin = 0
colors = np.asarray(colors)
if colors.ndim == 2:
scalarmax = colors.shape[0] - 1
if colors.ndim == 1:
scalarmax = 0
curPointID = 0
m = (0.0, 0.0, 0.0)
n = (1.0, 0.0, 0.0)
scalar = 0
# many colors
if colors.ndim == 2:
for Line in lines:
inw = True
mit = iter(Line)
nit = iter(Line)
next(nit)
while(inw):
try:
m = next(mit)
n = next(nit)
# scalar=sp.rand(1)
linescalars.SetNumberOfComponents(1)
points.InsertNextPoint(m)
linescalars.InsertNextTuple1(scalar)
points.InsertNextPoint(n)
linescalars.InsertNextTuple1(scalar)
lines_.InsertNextCell(2)
lines_.InsertCellPoint(curPointID)
lines_.InsertCellPoint(curPointID + 1)
curPointID += 2
except StopIteration:
break
scalar += 1
# one color only
if colors.ndim == 1:
for Line in lines:
inw = True
mit = iter(Line)
nit = iter(Line)
next(nit)
while(inw):
try:
m = next(mit)
n = next(nit)
# scalar=sp.rand(1)
linescalars.SetNumberOfComponents(1)
points.InsertNextPoint(m)
linescalars.InsertNextTuple1(scalar)
points.InsertNextPoint(n)
linescalars.InsertNextTuple1(scalar)
lines_.InsertNextCell(2)
lines_.InsertCellPoint(curPointID)
lines_.InsertCellPoint(curPointID + 1)
curPointID += 2
except StopIteration:
break
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines_)
polydata.GetPointData().SetScalars(linescalars)
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetLookupTable(lookuptable)
mapper.SetColorModeToMapScalars()
mapper.SetScalarRange(scalarmin, scalarmax)
mapper.SetScalarModeToUsePointData()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(linewidth)
actor.GetProperty().SetOpacity(opacity)
return actor
def dots(points, color=(1, 0, 0), opacity=1, dot_size=5):
""" Create one or more 3d points
Parameters
----------
points : ndarray, (N, 3)
color : tuple (3,)
opacity : float
dot_size : int
Returns
--------
vtkActor
See Also
---------
dipy.viz.fvtk.point
"""
if points.ndim == 2:
points_no = points.shape[0]
else:
points_no = 1
polyVertexPoints = vtk.vtkPoints()
polyVertexPoints.SetNumberOfPoints(points_no)
aPolyVertex = vtk.vtkPolyVertex()
aPolyVertex.GetPointIds().SetNumberOfIds(points_no)
cnt = 0
if points.ndim > 1:
for point in points:
polyVertexPoints.InsertPoint(cnt, point[0], point[1], point[2])
aPolyVertex.GetPointIds().SetId(cnt, cnt)
cnt += 1
else:
polyVertexPoints.InsertPoint(cnt, points[0], points[1], points[2])
aPolyVertex.GetPointIds().SetId(cnt, cnt)
cnt += 1
aPolyVertexGrid = vtk.vtkUnstructuredGrid()
aPolyVertexGrid.Allocate(1, 1)
aPolyVertexGrid.InsertNextCell(aPolyVertex.GetCellType(),
aPolyVertex.GetPointIds())
aPolyVertexGrid.SetPoints(polyVertexPoints)
aPolyVertexMapper = vtk.vtkDataSetMapper()
if major_version <= 5:
aPolyVertexMapper.SetInput(aPolyVertexGrid)
else:
aPolyVertexMapper.SetInputData(aPolyVertexGrid)
aPolyVertexActor = vtk.vtkActor()
aPolyVertexActor.SetMapper(aPolyVertexMapper)
aPolyVertexActor.GetProperty().SetColor(color)
aPolyVertexActor.GetProperty().SetOpacity(opacity)
aPolyVertexActor.GetProperty().SetPointSize(dot_size)
return aPolyVertexActor
def point(points, colors, opacity=1, point_radius=0.1, theta=8, phi=8):
""" Visualize points as sphere glyphs
Parameters
----------
points : ndarray, shape (N, 3)
colors : ndarray (N,3) or tuple (3,)
point_radius : float
theta : int
phi : int
Returns
-------
vtkActor
Examples
--------
>>> from dipy.viz import fvtk
>>> ren = fvtk.ren()
>>> pts = np.random.rand(5, 3)
>>> point_actor = fvtk.point(pts, fvtk.colors.coral)
>>> fvtk.add(ren, point_actor)
>>> #fvtk.show(ren)
"""
if np.array(colors).ndim == 1:
# return dots(points,colors,opacity)
colors = np.tile(colors, (len(points), 1))
scalars = vtk.vtkUnsignedCharArray()
scalars.SetNumberOfComponents(3)
pts = vtk.vtkPoints()
cnt_colors = 0
for p in points:
pts.InsertNextPoint(p[0], p[1], p[2])
scalars.InsertNextTuple3(
round(255 * colors[cnt_colors][0]), round(255 * colors[cnt_colors][1]), round(255 * colors[cnt_colors][2]))
cnt_colors += 1
src = vtk.vtkSphereSource()
src.SetRadius(point_radius)
src.SetThetaResolution(theta)
src.SetPhiResolution(phi)
polyData = vtk.vtkPolyData()
polyData.SetPoints(pts)
polyData.GetPointData().SetScalars(scalars)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(src.GetOutputPort())
if major_version <= 5:
glyph.SetInput(polyData)
else:
glyph.SetInputData(polyData)
glyph.SetColorModeToColorByScalar()
glyph.SetScaleModeToDataScalingOff()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(glyph.GetOutput())
else:
mapper.SetInputData(glyph.GetOutput())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(opacity)
return actor
def label(ren, text='Origin', pos=(0, 0, 0), scale=(0.2, 0.2, 0.2),
color=(1, 1, 1)):
''' Create a label actor.
This actor will always face the camera
Parameters
----------
ren : vtkRenderer() object
Renderer as returned by ``ren()``.
text : str
Text for the label.
pos : (3,) array_like, optional
Left down position of the label.
scale : (3,) array_like
Changes the size of the label.
color : (3,) array_like
Label color as ``(r,g,b)`` tuple.
Returns
-------
l : vtkActor object
Label.
Examples
--------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
'''
atext = vtk.vtkVectorText()
atext.SetText(text)
textm = vtk.vtkPolyDataMapper()
if major_version <= 5:
textm.SetInput(atext.GetOutput())
else:
textm.SetInputData(atext.GetOutput())
texta = vtk.vtkFollower()
texta.SetMapper(textm)
texta.SetScale(scale)
texta.GetProperty().SetColor(color)
texta.SetPosition(pos)
ren.AddActor(texta)
texta.SetCamera(ren.GetActiveCamera())
return texta
def volume(vol, voxsz=(1.0, 1.0, 1.0), affine=None, center_origin=1,
info=0, maptype=0, trilinear=1, iso=0, iso_thr=100,
opacitymap=None, colormap=None):
''' Create a volume and return a volumetric actor using volumetric
rendering.
This function has many different interesting capabilities. The maptype,
opacitymap and colormap are the most crucial parameters here.
Parameters
----------
vol : array, shape (N, M, K), dtype uint8
An array representing the volumetric dataset that we want to visualize
using volumetric rendering.
voxsz : (3,) array_like
Voxel size.
affine : (4, 4) ndarray
As given by volumeimages.
center_origin : int {0,1}
It considers that the center of the volume is the
point ``(-vol.shape[0]/2.0+0.5,-vol.shape[1]/2.0+0.5,-vol.shape[2]/2.0+0.5)``.
info : int {0,1}
If 1 it prints out some info about the volume, the method and the
dataset.
trilinear : int {0,1}
Use trilinear interpolation, default 1, gives smoother rendering. If
you want faster interpolation use 0 (Nearest).
maptype : int {0,1}
The maptype is a very important parameter which affects the raycasting algorithm in use for the rendering.
The options are:
If 0 then vtkVolumeTextureMapper2D is used.
If 1 then vtkVolumeRayCastFunction is used.
iso : int {0,1}
If iso is 1 and maptype is 1 then we use
``vtkVolumeRayCastIsosurfaceFunction`` which generates an isosurface at
the predefined iso_thr value. If iso is 0 and maptype is 1
``vtkVolumeRayCastCompositeFunction`` is used.
iso_thr : int
If iso is 1 then then this threshold in the volume defines the value
which will be used to create the isosurface.
opacitymap : (2, 2) ndarray
The opacity map assigns a transparency coefficient to every point in
the volume. The default value uses the histogram of the volume to
calculate the opacitymap.
colormap : (4, 4) ndarray
The color map assigns a color value to every point in the volume.
When None from the histogram it uses a red-blue colormap.
Returns
-------
v : vtkVolume
Volume.
Notes
--------
What is the difference between TextureMapper2D and RayCastFunction? Coming
soon... See VTK user's guide [book] & The Visualization Toolkit [book] and
VTK's online documentation & online docs.
What is the difference between RayCastIsosurfaceFunction and
RayCastCompositeFunction? Coming soon... See VTK user's guide [book] &
The Visualization Toolkit [book] and VTK's online documentation &
online docs.
What about trilinear interpolation?
Coming soon... well when time permits really ... :-)
Examples
--------
First example random points.
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> vol=100*np.random.rand(100,100,100)
>>> vol=vol.astype('uint8')
>>> vol.min(), vol.max()
(0, 99)
>>> r = fvtk.ren()
>>> v = fvtk.volume(vol)
>>> fvtk.add(r,v)
>>> #fvtk.show(r)
Second example with a more complicated function
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
>>> s = np.sin(x*y*z)/(x*y*z)
>>> r = fvtk.ren()
>>> v = fvtk.volume(s)
>>> fvtk.add(r,v)
>>> #fvtk.show(r)
If you find this function too complicated you can always use mayavi.
Please do not forget to use the -wthread switch in ipython if you are
running mayavi.
from enthought.mayavi import mlab
import numpy as np
x, y, z = np.ogrid[-10:10:20j, -10:10:20j, -10:10:20j]
s = np.sin(x*y*z)/(x*y*z)
mlab.pipeline.volume(mlab.pipeline.scalar_field(s))
mlab.show()
More mayavi demos are available here:
http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/mlab.html
'''
if vol.ndim != 3:
raise ValueError('3d numpy arrays only please')
if info:
print('Datatype', vol.dtype, 'converted to uint8')
vol = np.interp(vol, [vol.min(), vol.max()], [0, 255])
vol = vol.astype('uint8')
if opacitymap is None:
bin, res = np.histogram(vol.ravel())
res2 = np.interp(res, [vol.min(), vol.max()], [0, 1])
opacitymap = np.vstack((res, res2)).T
opacitymap = opacitymap.astype('float32')
'''
opacitymap=np.array([[ 0.0, 0.0],
[50.0, 0.9]])
'''
if info:
print('opacitymap', opacitymap)
if colormap is None:
bin, res = np.histogram(vol.ravel())
res2 = np.interp(res, [vol.min(), vol.max()], [0, 1])
zer = np.zeros(res2.shape)
colormap = np.vstack((res, res2, zer, res2[::-1])).T
colormap = colormap.astype('float32')
'''
colormap=np.array([[0.0, 0.5, 0.0, 0.0],
[64.0, 1.0, 0.5, 0.5],
[128.0, 0.9, 0.2, 0.3],
[196.0, 0.81, 0.27, 0.1],
[255.0, 0.5, 0.5, 0.5]])
'''
if info:
print('colormap', colormap)
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
if affine is not None:
aff = vtk.vtkMatrix4x4()
aff.DeepCopy((affine[0, 0], affine[0, 1], affine[0, 2], affine[0, 3], affine[1, 0], affine[1, 1], affine[1, 2], affine[1, 3], affine[2, 0], affine[
2, 1], affine[2, 2], affine[2, 3], affine[3, 0], affine[3, 1], affine[3, 2], affine[3, 3]))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],0,affine[1,0],affine[1,1],affine[1,2],0,affine[2,0],affine[2,1],affine[2,2],0,affine[3,0],affine[3,1],affine[3,2],1))
# aff.DeepCopy((affine[0,0],affine[0,1],affine[0,2],127.5,affine[1,0],affine[1,1],affine[1,2],-127.5,affine[2,0],affine[2,1],affine[2,2],-127.5,affine[3,0],affine[3,1],affine[3,2],1))
reslice = vtk.vtkImageReslice()
if major_version <= 5:
reslice.SetInput(im)
else:
reslice.SetInputData(im)
# reslice.SetOutputDimensionality(2)
# reslice.SetOutputOrigin(127,-145,147)
reslice.SetResliceAxes(aff)
# reslice.SetOutputOrigin(-127,-127,-127)
# reslice.SetOutputExtent(-127,128,-127,128,-127,128)
# reslice.SetResliceAxesOrigin(0,0,0)
# print 'Get Reslice Axes Origin ', reslice.GetResliceAxesOrigin()
# reslice.SetOutputSpacing(1.0,1.0,1.0)
reslice.SetInterpolationModeToLinear()
# reslice.UpdateWholeExtent()
# print 'reslice GetOutputOrigin', reslice.GetOutputOrigin()
# print 'reslice GetOutputExtent',reslice.GetOutputExtent()
# print 'reslice GetOutputSpacing',reslice.GetOutputSpacing()
changeFilter = vtk.vtkImageChangeInformation()
if major_version <= 5:
changeFilter.SetInput(reslice.GetOutput())
else:
changeFilter.SetInputData(reslice.GetOutput())
# changeFilter.SetInput(im)
if center_origin:
changeFilter.SetOutputOrigin(
-vol.shape[0] / 2.0 + 0.5, -vol.shape[1] / 2.0 + 0.5, -vol.shape[2] / 2.0 + 0.5)
print('ChangeFilter ', changeFilter.GetOutputOrigin())
opacity = vtk.vtkPiecewiseFunction()
for i in range(opacitymap.shape[0]):
opacity.AddPoint(opacitymap[i, 0], opacitymap[i, 1])
color = vtk.vtkColorTransferFunction()
for i in range(colormap.shape[0]):
color.AddRGBPoint(
colormap[i, 0], colormap[i, 1], colormap[i, 2], colormap[i, 3])
if(maptype == 0):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if info:
print('mapper VolumeTextureMapper2D')
mapper = vtk.vtkVolumeTextureMapper2D()
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
if (maptype == 1):
property = vtk.vtkVolumeProperty()
property.SetColor(color)
property.SetScalarOpacity(opacity)
property.ShadeOn()
if trilinear:
property.SetInterpolationTypeToLinear()
else:
property.SetInterpolationTypeToNearest()
if iso:
isofunc = vtk.vtkVolumeRayCastIsosurfaceFunction()
isofunc.SetIsoValue(iso_thr)
else:
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
if info:
print('mapper VolumeRayCastMapper')
mapper = vtk.vtkVolumeRayCastMapper()
if iso:
mapper.SetVolumeRayCastFunction(isofunc)
if info:
print('Isosurface')
else:
mapper.SetVolumeRayCastFunction(compositeFunction)
# mapper.SetMinimumImageSampleDistance(0.2)
if info:
print('Composite')
if affine is None:
if major_version <= 5:
mapper.SetInput(im)
else:
mapper.SetInputData(im)
else:
# mapper.SetInput(reslice.GetOutput())
if major_version <= 5:
mapper.SetInput(changeFilter.GetOutput())
else:
mapper.SetInputData(changeFilter.GetOutput())
# Return mid position in world space
# im2=reslice.GetOutput()
# index=im2.FindPoint(vol.shape[0]/2.0,vol.shape[1]/2.0,vol.shape[2]/2.0)
# print 'Image Getpoint ' , im2.GetPoint(index)
volum = vtk.vtkVolume()
volum.SetMapper(mapper)
volum.SetProperty(property)
if info:
print('Origin', volum.GetOrigin())
print('Orientation', volum.GetOrientation())
print('OrientationW', volum.GetOrientationWXYZ())
print('Position', volum.GetPosition())
print('Center', volum.GetCenter())
print('Get XRange', volum.GetXRange())
print('Get YRange', volum.GetYRange())
print('Get ZRange', volum.GetZRange())
print('Volume data type', vol.dtype)
return volum
def contour(vol, voxsz=(1.0, 1.0, 1.0), affine=None, levels=[50],
colors=[np.array([1.0, 0.0, 0.0])], opacities=[0.5]):
""" Take a volume and draw surface contours for any any number of
thresholds (levels) where every contour has its own color and opacity
Parameters
----------
vol : (N, M, K) ndarray
An array representing the volumetric dataset for which we will draw
some beautiful contours .
voxsz : (3,) array_like
Voxel size.
affine : None
Not used.
levels : array_like
Sequence of thresholds for the contours taken from image values needs
to be same datatype as `vol`.
colors : (N, 3) ndarray
RGB values in [0,1].
opacities : array_like
Opacities of contours.
Returns
-------
vtkAssembly
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> A=np.zeros((10,10,10))
>>> A[3:-3,3:-3,3:-3]=1
>>> r=fvtk.ren()
>>> fvtk.add(r,fvtk.contour(A,levels=[1]))
>>> #fvtk.show(r)
"""
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
im.SetDimensions(vol.shape[0], vol.shape[1], vol.shape[2])
# im.SetOrigin(0,0,0)
# im.SetSpacing(voxsz[2],voxsz[0],voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
ass = vtk.vtkAssembly()
# ass=[]
for (i, l) in enumerate(levels):
# print levels
skinExtractor = vtk.vtkContourFilter()
if major_version <= 5:
skinExtractor.SetInput(im)
else:
skinExtractor.SetInputData(im)
skinExtractor.SetValue(0, l)
skinNormals = vtk.vtkPolyDataNormals()
skinNormals.SetInputConnection(skinExtractor.GetOutputPort())
skinNormals.SetFeatureAngle(60.0)
skinMapper = vtk.vtkPolyDataMapper()
skinMapper.SetInputConnection(skinNormals.GetOutputPort())
skinMapper.ScalarVisibilityOff()
skin = vtk.vtkActor()
skin.SetMapper(skinMapper)
skin.GetProperty().SetOpacity(opacities[i])
# print colors[i]
skin.GetProperty().SetColor(colors[i][0], colors[i][1], colors[i][2])
# skin.Update()
ass.AddPart(skin)
del skin
del skinMapper
del skinExtractor
return ass
lowercase_cm_name = {'blues':'Blues', 'accent':'Accent'}
def create_colormap(v, name='jet', auto=True):
"""Create colors from a specific colormap and return it
as an array of shape (N,3) where every row gives the corresponding
r,g,b value. The colormaps we use are similar with those of pylab.
Parameters
----------
v : (N,) array
vector of values to be mapped in RGB colors according to colormap
name : str.
Name of the colormap. Currently implemented: 'jet', 'blues',
'accent', 'bone' and matplotlib colormaps if you have matplotlib
installed.
auto : bool,
if auto is True then v is interpolated to [0, 10] from v.min()
to v.max()
Notes
-----
Dipy supports a few colormaps for those who do not use Matplotlib, for
more colormaps consider downloading Matplotlib.
"""
if v.ndim > 1:
msg = 'This function works only with 1d arrays. Use ravel()'
raise ValueError(msg)
if auto:
v = np.interp(v, [v.min(), v.max()], [0, 1])
else:
v = np.clip(v, 0, 1)
# For backwards compatibility with lowercase names
newname = lowercase_cm_name.get(name) or name
colormap = get_cmap(newname)
if colormap is None:
e_s = "Colormap '%s' is not yet implemented " % name
raise ValueError(e_s)
rgba = colormap(v)
rgb = rgba[:, :3].copy()
return rgb
def _makeNd(array, ndim):
"""Pads as many 1s at the beginning of array's shape as are need to give
array ndim dimensions."""
new_shape = (1,) * (ndim - array.ndim) + array.shape
return array.reshape(new_shape)
def sphere_funcs(sphere_values, sphere, image=None, colormap='jet',
scale=2.2, norm=True, radial_scale=True):
"""Plot many morphed spherical functions simultaneously.
Parameters
----------
sphere_values : (M,) or (X, M) or (X, Y, M) or (X, Y, Z, M) ndarray
Values on the sphere.
sphere : Sphere
image : None,
Not yet supported.
colormap : None or 'jet'
If None then no color is used.
scale : float,
Distance between spheres.
norm : bool,
Normalize `sphere_values`.
radial_scale : bool,
Scale sphere points according to odf values.
Returns
-------
actor : vtkActor
Spheres.
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> odfs = np.ones((5, 5, 724))
>>> odfs[..., 0] = 2.
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.sphere_funcs(odfs, sphere))
>>> #fvtk.show(r)
"""
sphere_values = np.asarray(sphere_values)
if sphere_values.ndim > 4:
raise ValueError("Wrong shape")
sphere_values = _makeNd(sphere_values, 4)
grid_shape = np.array(sphere_values.shape[:3])
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
if sphere_values.shape[-1] != sphere.vertices.shape[0]:
msg = 'Sphere.vertices.shape[0] should be the same as the '
msg += 'last dimensions of sphere_values i.e. sphere_values.shape[-1]'
raise ValueError(msg)
list_sq = []
list_cols = []
for ijk in np.ndindex(*grid_shape):
m = sphere_values[ijk].copy()
if norm:
m /= abs(m).max()
if radial_scale:
xyz = vertices.T * m
else:
xyz = vertices.T.copy()
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
if colormap is not None:
cols = create_colormap(m, colormap)
cols = np.interp(cols, [0, 1], [0, 255]).astype('ubyte')
list_cols.append(cols)
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
if colormap is not None:
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
for k in xrange(len(list_sq)):
xyz = list_sq[k]
if colormap is not None:
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
if colormap is not None:
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
if colormap is not None:
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def peaks(peaks_dirs, peaks_values=None, scale=2.2, colors=(1, 0, 0)):
""" Visualize peak directions as given from ``peaks_from_model``
Parameters
----------
peaks_dirs : ndarray
Peak directions. The shape of the array can be (M, 3) or (X, M, 3) or
(X, Y, M, 3) or (X, Y, Z, M, 3)
peaks_values : ndarray
Peak values. The shape of the array can be (M, ) or (X, M) or
(X, Y, M) or (X, Y, Z, M)
scale : float
Distance between spheres
colors : ndarray or tuple
Peak colors
Returns
-------
vtkActor
See Also
--------
dipy.viz.fvtk.sphere_funcs
"""
peaks_dirs = np.asarray(peaks_dirs)
if peaks_dirs.ndim > 5:
raise ValueError("Wrong shape")
peaks_dirs = _makeNd(peaks_dirs, 5)
if peaks_values is not None:
peaks_values = _makeNd(peaks_values, 4)
grid_shape = np.array(peaks_dirs.shape[:3])
list_dirs = []
for ijk in np.ndindex(*grid_shape):
xyz = scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
for i in range(peaks_dirs.shape[-2]):
if peaks_values is not None:
pv = peaks_values[ijk][i]
else:
pv = 1.
symm = np.vstack((-peaks_dirs[ijk][i] * pv + xyz,
peaks_dirs[ijk][i] * pv + xyz))
list_dirs.append(symm)
return line(list_dirs, colors)
def tensor(evals, evecs, scalar_colors=None, sphere=None, scale=2.2, norm=True):
"""Plot many tensors as ellipsoids simultaneously.
Parameters
----------
evals : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
eigenvalues
evecs : (3, 3) or (X, 3, 3) or (X, Y, 3, 3) or (X, Y, Z, 3, 3) ndarray
eigenvectors
scalar_colors : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
RGB colors used to show the tensors
Default None, color the ellipsoids using ``color_fa``
sphere : Sphere,
this sphere will be transformed to the tensor ellipsoid
Default is None which uses a symmetric sphere with 724 points.
scale : float,
distance between ellipsoids.
norm : boolean,
Normalize `evals`.
Returns
-------
actor : vtkActor
Ellipsoids
Examples
--------
>>> from dipy.viz import fvtk
>>> r = fvtk.ren()
>>> evals = np.array([1.4, .35, .35]) * 10 ** (-3)
>>> evecs = np.eye(3)
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> fvtk.add(r, fvtk.tensor(evals, evecs, sphere=sphere))
>>> #fvtk.show(r)
"""
evals = np.asarray(evals)
if evals.ndim > 4:
raise ValueError("Wrong shape")
evals = _makeNd(evals, 4)
evecs = _makeNd(evecs, 5)
grid_shape = np.array(evals.shape[:3])
if sphere is None:
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
faces = np.asarray(sphere.faces, dtype=int)
vertices = sphere.vertices
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
colors.SetName("Colors")
if scalar_colors is None:
from dipy.reconst.dti import color_fa, fractional_anisotropy
cfa = color_fa(fractional_anisotropy(evals), evecs)
else:
cfa = _makeNd(scalar_colors, 4)
list_sq = []
list_cols = []
for ijk in ndindex(grid_shape):
ea = evals[ijk]
if norm:
ea /= ea.max()
ea = np.diag(ea.copy())
ev = evecs[ijk].copy()
xyz = np.dot(ev, np.dot(ea, vertices.T))
xyz += scale * (ijk - grid_shape / 2.)[:, None]
xyz = xyz.T
list_sq.append(xyz)
acolor = np.zeros(xyz.shape)
acolor[:, :] = np.interp(cfa[ijk], [0, 1], [0, 255])
list_cols.append(acolor.astype('ubyte'))
points = vtk.vtkPoints()
triangles = vtk.vtkCellArray()
for k in xrange(len(list_sq)):
xyz = list_sq[k]
cols = list_cols[k]
for i in xrange(xyz.shape[0]):
points.InsertNextPoint(*xyz[i])
colors.InsertNextTuple3(*cols[i])
for j in xrange(faces.shape[0]):
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
triangles.InsertNextCell(triangle)
del triangle
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetPolys(triangles)
polydata.GetPointData().SetScalars(colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if major_version <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
return actor
def slicer(vol, voxsz=(1.0, 1.0, 1.0), plane_i=[0], plane_j=None,
plane_k=None, outline=True):
""" Slice a 3D volume
Parameters
----------
vol : array, shape (N, M, K)
An array representing the volumetric dataset that we want to slice
voxsz : sequence of 3 floats
Voxel size.
plane_i : sequence of ints
show plane or planes along the first dimension
plane_j : sequence of ints
show plane or planes along the second dimension
plane_k : sequence of ints
show plane or planes along the third(last) dimension
outline : bool
if True (default) a small outline is drawn around the slices
Examples
--------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> x, y, z = np.ogrid[-10:10:80j, -10:10:80j, -10:10:80j]
>>> s = np.sin(x * y * z) / (x * y * z)
>>> r = fvtk.ren()
>>> fvtk.add(r, fvtk.slicer(s, plane_i=[0, 5]))
>>> #fvtk.show(r)
"""
if plane_i is None:
plane_i = []
if plane_j is None:
plane_j = []
if plane_k is None:
plane_k = []
if vol.ndim != 3:
raise ValueError("vol has to be a 3d array")
vol = np.interp(vol, xp=[vol.min(), vol.max()], fp=[0, 255])
vol = vol.astype('uint8')
im = vtk.vtkImageData()
if major_version <= 5:
im.SetScalarTypeToUnsignedChar()
I, J, K = vol.shape[:3]
im.SetDimensions(I, J, K)
# im.SetOrigin(0,0,0)
im.SetSpacing(voxsz[2], voxsz[0], voxsz[1])
if major_version <= 5:
im.AllocateScalars()
else:
im.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 3)
# copy data
for i in range(vol.shape[0]):
for j in range(vol.shape[1]):
for k in range(vol.shape[2]):
im.SetScalarComponentFromFloat(i, j, k, 0, vol[i, j, k])
# An outline provides context around the data.
outlineData = vtk.vtkOutlineFilter()
if major_version <= 5:
outlineData.SetInput(im)
else:
outlineData.SetInputData(im)
mapOutline = vtk.vtkPolyDataMapper()
mapOutline.SetInputConnection(outlineData.GetOutputPort())
outline_ = vtk.vtkActor()
outline_.SetMapper(mapOutline)
outline_.GetProperty().SetColor(1, 0, 0)
# Now we are creating three orthogonal planes passing through the
# volume. Each plane uses a different texture map and therefore has
# diferent coloration.
# Start by creatin a black/white lookup table.
lut = vtk.vtkLookupTable()
lut.SetTableRange(vol.min(), vol.max())
lut.SetSaturationRange(0, 0)
lut.SetHueRange(0, 0)
lut.SetValueRange(0, 1)
lut.SetRampToLinear()
lut.Build()
x1, x2, y1, y2, z1, z2 = im.GetExtent()
# print x1,x2,y1,y2,z1,z2
# Create the first of the three planes. The filter vtkImageMapToColors
# maps the data through the corresponding lookup table created above.
# The vtkImageActor is a type of vtkProp and conveniently displays an
# image on a single quadrilateral plane. It does this using texture
# mapping and as a result is quite fast. (Note: the input image has to
# be unsigned char values, which the vtkImageMapToColors produces.)
# Note also that by specifying the DisplayExtent, the pipeline
# requests data of this extent and the vtkImageMapToColors only
# processes a slice of data.
planeColors = vtk.vtkImageMapToColors()
# saggitalColors.SetInputConnection(im.GetOutputPort())
if major_version <= 5:
planeColors.SetInput(im)
else:
planeColors.SetInputData(im)
planeColors.SetLookupTable(lut)
planeColors.Update()
saggitals = []
for x in plane_i:
saggital = vtk.vtkImageActor()
if major_version <= 5:
saggital.SetInput(planeColors.GetOutput())
else:
saggital.SetInputData(planeColors.GetOutput())
saggital.SetDisplayExtent(x, x, y1, y2, z1, z2)
saggitals.append(saggital)
axials = []
for z in plane_k:
axial = vtk.vtkImageActor()
if major_version <= 5:
axial.SetInput(planeColors.GetOutput())
else:
axial.SetInputData(planeColors.GetOutput())
axial.SetDisplayExtent(x1, x2, y1, y2, z, z)
axials.append(axial)
coronals = []
for y in plane_j:
coronal = vtk.vtkImageActor()
if major_version <= 5:
coronal.SetInput(planeColors.GetOutput())
else:
coronal.SetInputData(planeColors.GetOutput())
coronal.SetDisplayExtent(x1, x2, y, y, z1, z2)
coronals.append(coronal)
assem = vtk.vtkAssembly()
for sag in saggitals:
assem.AddPart(sag)
for ax in axials:
assem.AddPart(ax)
for cor in coronals:
assem.AddPart(cor)
if outline:
assem.AddPart(outline_)
return assem
def camera(ren, pos=None, focal=None, viewup=None, verbose=True):
""" Change the active camera
Parameters
----------
ren : vtkRenderer
pos : tuple
(x, y, z) position of the camera
focal : tuple
(x, y, z) focal point
viewup : tuple
(x, y, z) viewup vector
verbose : bool
show information about the camera
Returns
-------
vtkCamera
"""
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f,%.2f,%.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
if pos is not None:
cam = ren.GetActiveCamera().SetPosition(*pos)
if focal is not None:
ren.GetActiveCamera().SetFocalPoint(*focal)
if viewup is not None:
ren.GetActiveCamera().SetViewUp(*viewup)
cam = ren.GetActiveCamera()
if pos is not None or focal is not None or viewup is not None:
if verbose:
print('-------------------------------------')
print('Camera New Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera New Focal Point (%.2f,%.2f,%.2f)' %
cam.GetFocalPoint())
print('Camera New View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
return cam
def show(ren, title='Dipy', size=(300, 300), png_magnify=1):
""" Show window
Notes
-----
To save a screenshot press's' and check your current directory
for ``fvtk.png``.
Parameters
------------
ren : vtkRenderer() object
As returned from function ``ren()``.
title : string
A string for the window title bar.
size : (int, int)
``(width, height)`` of the window
png_magnify : int
Number of times to magnify the screenshot.
Notes
-----
If you want to:
* navigate in the the 3d world use the left - middle - right mouse buttons
* reset the screen press 'r'
* save a screenshot press 's'
* quit press 'q'
See also
---------
dipy.viz.fvtk.record
Examples
----------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3),np.random.rand(20,3)]
>>> colors=np.array([[0.2,0.2,0.2],[0.8,0.8,0.8]])
>>> c=fvtk.line(lines,colors)
>>> fvtk.add(r,c)
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
See also
----------
dipy.viz.fvtk.record
"""
ren.ResetCamera()
window = vtk.vtkRenderWindow()
window.AddRenderer(ren)
# window.SetAAFrames(6)
window.SetWindowName(title)
window.SetSize(size[0], size[1])
style = vtk.vtkInteractorStyleTrackballCamera()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(window)
iren.SetPicker(picker)
def key_press(obj, event):
key = obj.GetKeySym()
if key == 's' or key == 'S':
print('Saving image...')
renderLarge = vtk.vtkRenderLargeImage()
if major_version <= 5:
renderLarge.SetInput(ren)
else:
renderLarge.SetInputData(ren)
renderLarge.SetMagnification(png_magnify)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(renderLarge.GetOutputPort())
writer.SetFileName('fvtk.png')
writer.Write()
print('Look for fvtk.png in your current working directory.')
iren.AddObserver('KeyPressEvent', key_press)
iren.SetInteractorStyle(style)
iren.Initialize()
picker.Pick(85, 126, 0, ren)
window.Render()
iren.Start()
# window.RemoveAllObservers()
# ren.SetRenderWindow(None)
window.RemoveRenderer(ren)
ren.SetRenderWindow(None)
def record(ren=None, cam_pos=None, cam_focal=None, cam_view=None,
out_path=None, path_numbering=False, n_frames=1, az_ang=10,
magnification=1, size=(300, 300), verbose=False):
''' This will record a video of your scene
Records a video as a series of ``.png`` files of your scene by rotating
the azimuth angle az_angle in every frame.
Parameters
-----------
ren : vtkRenderer() object
As returned from :func:`ren`.
cam_pos : None or sequence (3,), optional
Camera position.
cam_focal : None or sequence (3,), optional
Camera focal point.
cam_view : None or sequence (3,), optional
Camera view up.
out_path : str, optional
Output directory for the frames
path_numbering : bool, optional
when recording it changes out_path to out_path + str(frame number).
If n_frames is larger than 1, this will default to True
n_frames : int, optional
number of frames to save. Default: 1
az_ang : float, optional
Azimuthal angle of camera rotation (degrees). Default: 10.
magnification : int, optional
How much to magnify the saved frame. Default: 1 (no magnification).
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #uncomment below to record
>>> #fvtk.record(r)
>>> #check for new images in current directory
'''
if ren is None:
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(size[0], size[1])
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# ren.GetActiveCamera().Azimuth(180)
ren.ResetCamera()
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
ang = 0
if cam_pos is not None:
cx, cy, cz = cam_pos
ren.GetActiveCamera().SetPosition(cx, cy, cz)
if cam_focal is not None:
fx, fy, fz = cam_focal
ren.GetActiveCamera().SetFocalPoint(fx, fy, fz)
if cam_view is not None:
ux, uy, uz = cam_view
ren.GetActiveCamera().SetViewUp(ux, uy, uz)
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f,%.2f,%.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f,%.2f,%.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f,%.2f,%.2f)' % cam.GetViewUp())
for i in range(n_frames):
ren.GetActiveCamera().Azimuth(ang)
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer.SetInputConnection(renderLarge.GetOutputPort())
# filename='/tmp/'+str(3000000+i)+'.png'
if n_frames > 1 or path_numbering:
if out_path is None:
filename = str(1000000 + i) + '.png'
else:
filename = out_path + str(1000000 + i) + '.png'
else:
filename = out_path
writer.SetFileName(filename)
writer.Write()
ang = +az_ang
if __name__ == "__main__":
pass
|
smarkwell/asuswrt-merlin
|
refs/heads/master
|
release/src-rt-6.x.4708/linux/linux-2.6.36/tools/perf/scripts/python/sched-migration.py
|
185
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf trace event handlers have been generated by perf trace -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
qedi-r/home-assistant
|
refs/heads/dev
|
homeassistant/components/lock/device_action.py
|
2
|
"""Provides device automations for Lock."""
from typing import Optional, List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_DOMAIN,
CONF_TYPE,
CONF_DEVICE_ID,
CONF_ENTITY_ID,
SERVICE_LOCK,
SERVICE_OPEN,
SERVICE_UNLOCK,
)
from homeassistant.core import HomeAssistant, Context
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from . import DOMAIN, SUPPORT_OPEN
ACTION_TYPES = {"lock", "unlock", "open"}
ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(ACTION_TYPES),
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN),
}
)
async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device actions for Lock devices."""
registry = await entity_registry.async_get_registry(hass)
actions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add actions for each entity that belongs to this integration
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "lock",
}
)
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "unlock",
}
)
state = hass.states.get(entry.entity_id)
if state:
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & (SUPPORT_OPEN):
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "open",
}
)
return actions
async def async_call_action_from_config(
hass: HomeAssistant, config: dict, variables: dict, context: Optional[Context]
) -> None:
"""Execute a device action."""
config = ACTION_SCHEMA(config)
service_data = {ATTR_ENTITY_ID: config[CONF_ENTITY_ID]}
if config[CONF_TYPE] == "lock":
service = SERVICE_LOCK
elif config[CONF_TYPE] == "unlock":
service = SERVICE_UNLOCK
elif config[CONF_TYPE] == "open":
service = SERVICE_OPEN
await hass.services.async_call(
DOMAIN, service, service_data, blocking=True, context=context
)
|
Affix/CouchPotatoServer
|
refs/heads/master
|
libs/xmpp/transports.py
|
89
|
## transports.py
##
## Copyright (C) 2003-2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: transports.py,v 1.35 2009/04/07 08:34:09 snakeru Exp $
"""
This module contains the low-level implementations of xmpppy connect methods or
(in other words) transports for xmpp-stanzas.
Currently here is three transports:
direct TCP connect - TCPsocket class
proxied TCP connect - HTTPPROXYsocket class (CONNECT proxies)
TLS connection - TLS class. Can be used for SSL connections also.
Transports are stackable so you - f.e. TLS use HTPPROXYsocket or TCPsocket as more low-level transport.
Also exception 'error' is defined to allow capture of this module specific exceptions.
"""
import socket, select, base64, dispatcher, sys
from simplexml import ustr
from client import PlugIn
from protocol import *
# determine which DNS resolution library is available
HAVE_DNSPYTHON = False
HAVE_PYDNS = False
try:
import dns.resolver # http://dnspython.org/
HAVE_DNSPYTHON = True
except ImportError:
try:
import DNS # http://pydns.sf.net/
HAVE_PYDNS = True
except ImportError:
pass
DATA_RECEIVED = 'DATA RECEIVED'
DATA_SENT = 'DATA SENT'
class error:
"""An exception to be raised in case of low-level errors in methods of 'transports' module."""
def __init__(self, comment):
"""Cache the descriptive string"""
self._comment = comment
def __str__(self):
"""Serialise exception into pre-cached descriptive string."""
return self._comment
BUFLEN = 1024
class TCPsocket(PlugIn):
""" This class defines direct TCP connection method. """
def __init__(self, server = None, use_srv = True):
""" Cache connection point 'server'. 'server' is the tuple of (host, port)
absolutely the same as standard tcp socket uses. However library will lookup for
('_xmpp-client._tcp.' + host) SRV record in DNS and connect to the found (if it is)
server instead
"""
PlugIn.__init__(self)
self.DBG_LINE = 'socket'
self._exported_methods = [self.send, self.disconnect]
self._server, self.use_srv = server, use_srv
def srv_lookup(self, server):
" SRV resolver. Takes server=(host, port) as argument. Returns new (host, port) pair "
if HAVE_DNSPYTHON or HAVE_PYDNS:
host, port = server
possible_queries = ['_xmpp-client._tcp.' + host]
for query in possible_queries:
try:
if HAVE_DNSPYTHON:
answers = [x for x in dns.resolver.query(query, 'SRV')]
if answers:
host = str(answers[0].target)
port = int(answers[0].port)
break
elif HAVE_PYDNS:
# ensure we haven't cached an old configuration
DNS.DiscoverNameServers()
response = DNS.Request().req(query, qtype = 'SRV')
answers = response.answers
if len(answers) > 0:
# ignore the priority and weight for now
_, _, port, host = answers[0]['data']
del _
port = int(port)
break
except:
self.DEBUG('An error occurred while looking up %s' % query, 'warn')
server = (host, port)
else:
self.DEBUG("Could not load one of the supported DNS libraries (dnspython or pydns). SRV records will not be queried and you may need to set custom hostname/port for some servers to be accessible.\n", 'warn')
# end of SRV resolver
return server
def plugin(self, owner):
""" Fire up connection. Return non-empty string on success.
Also registers self.disconnected method in the owner's dispatcher.
Called internally. """
if not self._server: self._server = (self._owner.Server, 5222)
if self.use_srv: server = self.srv_lookup(self._server)
else: server = self._server
if not self.connect(server): return
self._owner.Connection = self
self._owner.RegisterDisconnectHandler(self.disconnected)
return 'ok'
def getHost(self):
""" Return the 'host' value that is connection is [will be] made to."""
return self._server[0]
def getPort(self):
""" Return the 'port' value that is connection is [will be] made to."""
return self._server[1]
def connect(self, server = None):
""" Try to connect to the given host/port. Does not lookup for SRV record.
Returns non-empty string on success. """
try:
if not server: server = self._server
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((server[0], int(server[1])))
self._send = self._sock.sendall
self._recv = self._sock.recv
self.DEBUG("Successfully connected to remote host %s" % `server`, 'start')
return 'ok'
except socket.error, (errno, strerror):
self.DEBUG("Failed to connect to remote host %s: %s (%s)" % (`server`, strerror, errno), 'error')
except: pass
def plugout(self):
""" Disconnect from the remote server and unregister self.disconnected method from
the owner's dispatcher. """
self._sock.close()
if self._owner.__dict__.has_key('Connection'):
del self._owner.Connection
self._owner.UnregisterDisconnectHandler(self.disconnected)
def receive(self):
""" Reads all pending incoming data.
In case of disconnection calls owner's disconnected() method and then raises IOError exception."""
try: received = self._recv(BUFLEN)
except socket.sslerror, e:
self._seen_data = 0
if e[0] == socket.SSL_ERROR_WANT_READ: return ''
if e[0] == socket.SSL_ERROR_WANT_WRITE: return ''
self.DEBUG('Socket error while receiving data', 'error')
sys.exc_clear()
self._owner.disconnected()
raise IOError("Disconnected from server")
except: received = ''
while self.pending_data(0):
try: add = self._recv(BUFLEN)
except: add = ''
received += add
if not add: break
if len(received): # length of 0 means disconnect
self._seen_data = 1
self.DEBUG(received, 'got')
if hasattr(self._owner, 'Dispatcher'):
self._owner.Dispatcher.Event('', DATA_RECEIVED, received)
else:
self.DEBUG('Socket error while receiving data', 'error')
self._owner.disconnected()
raise IOError("Disconnected from server")
return received
def send(self, raw_data):
""" Writes raw outgoing data. Blocks until done.
If supplied data is unicode string, encodes it to utf-8 before send."""
if type(raw_data) == type(u''): raw_data = raw_data.encode('utf-8')
elif type(raw_data) <> type(''): raw_data = ustr(raw_data).encode('utf-8')
try:
self._send(raw_data)
# Avoid printing messages that are empty keepalive packets.
if raw_data.strip():
self.DEBUG(raw_data, 'sent')
if hasattr(self._owner, 'Dispatcher'): # HTTPPROXYsocket will send data before we have a Dispatcher
self._owner.Dispatcher.Event('', DATA_SENT, raw_data)
except:
self.DEBUG("Socket error while sending data", 'error')
self._owner.disconnected()
def pending_data(self, timeout = 0):
""" Returns true if there is a data ready to be read. """
return select.select([self._sock], [], [], timeout)[0]
def disconnect(self):
""" Closes the socket. """
self.DEBUG("Closing socket", 'stop')
self._sock.close()
def disconnected(self):
""" Called when a Network Error or disconnection occurs.
Designed to be overidden. """
self.DEBUG("Socket operation failed", 'error')
DBG_CONNECT_PROXY = 'CONNECTproxy'
class HTTPPROXYsocket(TCPsocket):
""" HTTP (CONNECT) proxy connection class. Uses TCPsocket as the base class
redefines only connect method. Allows to use HTTP proxies like squid with
(optionally) simple authentication (using login and password). """
def __init__(self, proxy, server, use_srv = True):
""" Caches proxy and target addresses.
'proxy' argument is a dictionary with mandatory keys 'host' and 'port' (proxy address)
and optional keys 'user' and 'password' to use for authentication.
'server' argument is a tuple of host and port - just like TCPsocket uses. """
TCPsocket.__init__(self, server, use_srv)
self.DBG_LINE = DBG_CONNECT_PROXY
self._proxy = proxy
def plugin(self, owner):
""" Starts connection. Used interally. Returns non-empty string on success."""
owner.debug_flags.append(DBG_CONNECT_PROXY)
return TCPsocket.plugin(self, owner)
def connect(self, dupe = None):
""" Starts connection. Connects to proxy, supplies login and password to it
(if were specified while creating instance). Instructs proxy to make
connection to the target server. Returns non-empty sting on success. """
if not TCPsocket.connect(self, (self._proxy['host'], self._proxy['port'])): return
self.DEBUG("Proxy server contacted, performing authentification", 'start')
connector = ['CONNECT %s:%s HTTP/1.0' % self._server,
'Proxy-Connection: Keep-Alive',
'Pragma: no-cache',
'Host: %s:%s' % self._server,
'User-Agent: HTTPPROXYsocket/v0.1']
if self._proxy.has_key('user') and self._proxy.has_key('password'):
credentials = '%s:%s' % (self._proxy['user'], self._proxy['password'])
credentials = base64.encodestring(credentials).strip()
connector.append('Proxy-Authorization: Basic ' + credentials)
connector.append('\r\n')
self.send('\r\n'.join(connector))
try: reply = self.receive().replace('\r', '')
except IOError:
self.DEBUG('Proxy suddenly disconnected', 'error')
self._owner.disconnected()
return
try: proto, code, desc = reply.split('\n')[0].split(' ', 2)
except: raise error('Invalid proxy reply')
if code <> '200':
self.DEBUG('Invalid proxy reply: %s %s %s' % (proto, code, desc), 'error')
self._owner.disconnected()
return
while reply.find('\n\n') == -1:
try: reply += self.receive().replace('\r', '')
except IOError:
self.DEBUG('Proxy suddenly disconnected', 'error')
self._owner.disconnected()
return
self.DEBUG("Authentification successfull. Jabber server contacted.", 'ok')
return 'ok'
def DEBUG(self, text, severity):
"""Overwrites DEBUG tag to allow debug output be presented as "CONNECTproxy"."""
return self._owner.DEBUG(DBG_CONNECT_PROXY, text, severity)
class TLS(PlugIn):
""" TLS connection used to encrypts already estabilished tcp connection."""
def PlugIn(self, owner, now = 0):
""" If the 'now' argument is true then starts using encryption immidiatedly.
If 'now' in false then starts encryption as soon as TLS feature is
declared by the server (if it were already declared - it is ok).
"""
if owner.__dict__.has_key('TLS'): return # Already enabled.
PlugIn.PlugIn(self, owner)
DBG_LINE = 'TLS'
if now: return self._startSSL()
if self._owner.Dispatcher.Stream.features:
try: self.FeaturesHandler(self._owner.Dispatcher, self._owner.Dispatcher.Stream.features)
except NodeProcessed: pass
else: self._owner.RegisterHandlerOnce('features', self.FeaturesHandler, xmlns = NS_STREAMS)
self.starttls = None
def plugout(self, now = 0):
""" Unregisters TLS handler's from owner's dispatcher. Take note that encription
can not be stopped once started. You can only break the connection and start over."""
self._owner.UnregisterHandler('features', self.FeaturesHandler, xmlns = NS_STREAMS)
self._owner.UnregisterHandler('proceed', self.StartTLSHandler, xmlns = NS_TLS)
self._owner.UnregisterHandler('failure', self.StartTLSHandler, xmlns = NS_TLS)
def FeaturesHandler(self, conn, feats):
""" Used to analyse server <features/> tag for TLS support.
If TLS is supported starts the encryption negotiation. Used internally"""
if not feats.getTag('starttls', namespace = NS_TLS):
self.DEBUG("TLS unsupported by remote server.", 'warn')
return
self.DEBUG("TLS supported by remote server. Requesting TLS start.", 'ok')
self._owner.RegisterHandlerOnce('proceed', self.StartTLSHandler, xmlns = NS_TLS)
self._owner.RegisterHandlerOnce('failure', self.StartTLSHandler, xmlns = NS_TLS)
self._owner.Connection.send('<starttls xmlns="%s"/>' % NS_TLS)
raise NodeProcessed
def pending_data(self, timeout = 0):
""" Returns true if there possible is a data ready to be read. """
return self._tcpsock._seen_data or select.select([self._tcpsock._sock], [], [], timeout)[0]
def _startSSL(self):
""" Immidiatedly switch socket to TLS mode. Used internally."""
""" Here we should switch pending_data to hint mode."""
tcpsock = self._owner.Connection
tcpsock._sslObj = socket.ssl(tcpsock._sock, None, None)
tcpsock._sslIssuer = tcpsock._sslObj.issuer()
tcpsock._sslServer = tcpsock._sslObj.server()
tcpsock._recv = tcpsock._sslObj.read
tcpsock._send = tcpsock._sslObj.write
tcpsock._seen_data = 1
self._tcpsock = tcpsock
tcpsock.pending_data = self.pending_data
tcpsock._sock.setblocking(0)
self.starttls = 'success'
def StartTLSHandler(self, conn, starttls):
""" Handle server reply if TLS is allowed to process. Behaves accordingly.
Used internally."""
if starttls.getNamespace() <> NS_TLS: return
self.starttls = starttls.getName()
if self.starttls == 'failure':
self.DEBUG("Got starttls response: " + self.starttls, 'error')
return
self.DEBUG("Got starttls proceed response. Switching to TLS/SSL...", 'ok')
self._startSSL()
self._owner.Dispatcher.PlugOut()
dispatcher.Dispatcher().PlugIn(self._owner)
|
dtran320/django-storages
|
refs/heads/master
|
storages/backends/s3boto.py
|
1
|
import os
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils.encoding import force_unicode, smart_str
try:
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
from boto.s3.key import Key
except ImportError:
raise ImproperlyConfigured, "Could not load Boto's S3 bindings.\
\nSee http://code.google.com/p/boto/"
ACCESS_KEY_NAME = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
SECRET_KEY_NAME = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
HEADERS = getattr(settings, 'AWS_HEADERS', {})
STORAGE_BUCKET_NAME = getattr(settings, 'AWS_STORAGE_BUCKET_NAME', None)
AUTO_CREATE_BUCKET = getattr(settings, 'AWS_AUTO_CREATE_BUCKET', True)
DEFAULT_ACL = getattr(settings, 'AWS_DEFAULT_ACL', 'public-read')
BUCKET_ACL = getattr(settings, 'AWS_BUCKET_ACL', DEFAULT_ACL)
QUERYSTRING_AUTH = getattr(settings, 'AWS_QUERYSTRING_AUTH', True)
QUERYSTRING_EXPIRE = getattr(settings, 'AWS_QUERYSTRING_EXPIRE', 3600)
REDUCED_REDUNDANCY = getattr(settings, 'AWS_REDUCED_REDUNDANCY', False)
LOCATION = getattr(settings, 'AWS_LOCATION', '')
CUSTOM_DOMAIN = getattr(settings, 'AWS_S3_CUSTOM_DOMAIN', None)
SECURE_URLS = getattr(settings, 'AWS_S3_SECURE_URLS', True)
FILE_NAME_CHARSET = getattr(settings, 'AWS_S3_FILE_NAME_CHARSET', 'utf-8')
FILE_OVERWRITE = getattr(settings, 'AWS_S3_FILE_OVERWRITE', True)
IS_GZIPPED = getattr(settings, 'AWS_IS_GZIPPED', False)
GZIP_CONTENT_TYPES = getattr(settings, 'GZIP_CONTENT_TYPES', (
'text/css',
'application/javascript',
'application/x-javascript'
))
if IS_GZIPPED:
from gzip import GzipFile
def safe_join(base, *paths):
"""
A version of django.utils._os.safe_join for S3 paths.
Joins one or more path components to the base path component intelligently.
Returns a normalized version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
Paths outside the base path indicate a possible security sensitive operation.
"""
from urlparse import urljoin
base_path = force_unicode(base)
paths = map(lambda p: force_unicode(p), paths)
final_path = urljoin(base_path + ("/" if not base_path.endswith("/") else ""), *paths)
# Ensure final_path starts with base_path and that the next character after
# the final path is '/' (or nothing, in which case final_path must be
# equal to base_path).
base_path_len = len(base_path)
if not final_path.startswith(base_path) \
or final_path[base_path_len:base_path_len+1] not in ('', '/'):
raise ValueError('the joined path is located outside of the base path'
' component')
return final_path
class S3BotoStorage(Storage):
"""Amazon Simple Storage Service using Boto"""
def __init__(self, bucket=STORAGE_BUCKET_NAME, access_key=None,
secret_key=None, bucket_acl=BUCKET_ACL, acl=DEFAULT_ACL, headers=HEADERS,
gzip=IS_GZIPPED, gzip_content_types=GZIP_CONTENT_TYPES,
querystring_auth=QUERYSTRING_AUTH, querystring_expire=QUERYSTRING_EXPIRE,
reduced_redundancy=REDUCED_REDUNDANCY,
custom_domain=CUSTOM_DOMAIN, secure_urls=SECURE_URLS,
location=LOCATION, file_name_charset=FILE_NAME_CHARSET):
self.bucket_acl = bucket_acl
self.bucket_name = bucket
self.acl = acl
self.headers = headers
self.gzip = gzip
self.gzip_content_types = gzip_content_types
self.querystring_auth = querystring_auth
self.querystring_expire = querystring_expire
self.reduced_redundancy = reduced_redundancy
self.custom_domain = custom_domain
self.secure_urls = secure_urls
self.location = location or ''
self.location = self.location.lstrip('/')
self.file_name_charset = file_name_charset
if not access_key and not secret_key:
access_key, secret_key = self._get_access_keys()
self.connection = S3Connection(access_key, secret_key)
@property
def bucket(self):
if not hasattr(self, '_bucket'):
self._bucket = self._get_or_create_bucket(self.bucket_name)
return self._bucket
def _get_access_keys(self):
access_key = ACCESS_KEY_NAME
secret_key = SECRET_KEY_NAME
if (access_key or secret_key) and (not access_key or not secret_key):
access_key = os.environ.get(ACCESS_KEY_NAME)
secret_key = os.environ.get(SECRET_KEY_NAME)
if access_key and secret_key:
# Both were provided, so use them
return access_key, secret_key
return None, None
def _get_or_create_bucket(self, name):
"""Retrieves a bucket if it exists, otherwise creates it."""
try:
return self.connection.get_bucket(name)
except S3ResponseError, e:
if AUTO_CREATE_BUCKET:
bucket = self.connection.create_bucket(name)
bucket.set_acl(self.bucket_acl)
return bucket
raise ImproperlyConfigured, ("Bucket specified by "
"AWS_STORAGE_BUCKET_NAME does not exist. Buckets can be "
"automatically created by setting AWS_AUTO_CREATE_BUCKET=True")
def _clean_name(self, name):
# Useful for windows' paths
return os.path.normpath(name).replace('\\', '/')
def _normalize_name(self, name):
try:
return safe_join(self.location, name).lstrip('/')
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
def _encode_name(self, name):
return smart_str(name, encoding=self.file_name_charset)
def _compress_content(self, content):
"""Gzip a given string."""
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(content.read())
zfile.close()
content.file = zbuf
return content
def _open(self, name, mode='rb'):
name = self._normalize_name(self._clean_name(name))
f = S3BotoStorageFile(name, mode, self)
if not f.key:
raise IOError('File does not exist: %s' % name)
return f
def _save(self, name, content):
cleaned_name = self._clean_name(name)
name = self._normalize_name(cleaned_name)
headers = self.headers
content_type = getattr(content,'content_type', mimetypes.guess_type(name)[0] or Key.DefaultContentType)
if self.gzip and content_type in self.gzip_content_types:
content = self._compress_content(content)
headers.update({'Content-Encoding': 'gzip'})
content.name = cleaned_name
k = self.bucket.get_key(self._encode_name(name))
if not k:
k = self.bucket.new_key(self._encode_name(name))
k.set_metadata('Content-Type',content_type)
k.set_contents_from_file(content, headers=headers, policy=self.acl,
reduced_redundancy=self.reduced_redundancy)
return cleaned_name
def delete(self, name):
name = self._normalize_name(self._clean_name(name))
self.bucket.delete_key(self._encode_name(name))
def exists(self, name):
name = self._normalize_name(self._clean_name(name))
k = self.bucket.new_key(self._encode_name(name))
return k.exists()
def listdir(self, name):
name = self._normalize_name(self._clean_name(name))
dirlist = self.bucket.list(self._encode_name(name))
files = []
dirs = set()
base_parts = name.split("/") if name else []
for item in dirlist:
parts = item.name.split("/")
parts = parts[len(base_parts):]
if len(parts) == 1:
# File
files.append(parts[0])
elif len(parts) > 1:
# Directory
dirs.add(parts[0])
return list(dirs),files
def size(self, name):
name = self._normalize_name(self._clean_name(name))
return self.bucket.get_key(self._encode_name(name)).size
def url(self, name):
name = self._normalize_name(self._clean_name(name))
if self.custom_domain:
return "%s://%s/%s" % ('https' if self.secure_urls else 'http', self.custom_domain, name)
else:
return self.connection.generate_url(self.querystring_expire, method='GET', \
bucket=self.bucket.name, key=self._encode_name(name), query_auth=self.querystring_auth, \
force_http=not self.secure_urls)
def get_available_name(self, name):
""" Overwrite existing file with the same name. """
if FILE_OVERWRITE:
name = self._clean_name(name)
return name
return super(S3BotoStorage, self).get_available_name(name)
class S3BotoStorageFile(File):
def __init__(self, name, mode, storage):
self._storage = storage
self.name = name[len(self._storage.location):].lstrip('/')
self._mode = mode
self.key = storage.bucket.get_key(self._storage._encode_name(name))
self._is_dirty = False
self._file = None
@property
def size(self):
return self.key.size
@property
def file(self):
if self._file is None:
self._file = StringIO()
if 'r' in self._mode:
self._is_dirty = False
self.key.get_contents_to_file(self._file)
self._file.seek(0)
return self._file
def read(self, *args, **kwargs):
if 'r' not in self._mode:
raise AttributeError("File was not opened in read mode.")
return super(S3BotoStorageFile, self).read(*args, **kwargs)
def write(self, *args, **kwargs):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self._is_dirty = True
return super(S3BotoStorageFile, self).write(*args, **kwargs)
def close(self):
if self._is_dirty:
self.key.set_contents_from_file(self._file, headers=self._storage.headers, policy=self._storage.acl)
self.key.close()
|
Hasky0911/Maoyan
|
refs/heads/master
|
node_modules/node-gyp/gyp/tools/pretty_sln.py
|
1831
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
petosegan/scikit-learn
|
refs/heads/master
|
sklearn/random_projection.py
|
207
|
# -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_array, NotFittedError
from .utils import DataDimensionalityWarning
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Read more in the :ref:`User Guide <johnson_lindenstrauss>`.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
""" Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components: numpy array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
gaussian_random_matrix
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for i in xrange(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
self.components_ = None
self.n_components_ = None
@abstractmethod
def _make_random_matrix(n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components_)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components),
DataDimensionalityWarning)
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert_equal(
self.components_.shape,
(self.n_components_, n_features),
err_msg=('An error has occurred the self.components_ matrix has '
' not the proper shape.'))
return self
def transform(self, X, y=None):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.components_ is None:
raise NotFittedError('No random projection matrix had been fit.')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection
The components of the random matrix are drawn from N(0, 1 / n_components).
Read more in the :ref:`User Guide <gaussian_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : strictly positive float, optional (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : numpy array of shape [n_components, n_features]
Random matrix used for the projection.
See Also
--------
SparseRandomProjection
"""
def __init__(self, n_components='auto', eps=0.1, random_state=None):
super(GaussianRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Read more in the :ref:`User Guide <sparse_random_matrix>`.
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float in range ]0, 1], optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : strictly positive float, optional, (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : boolean, optional (default=False)
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
n_component_ : int
Concrete number of components computed when n_components="auto".
components_ : CSR matrix with shape [n_components, n_features]
Random matrix used for the projection.
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
def __init__(self, n_components='auto', density='auto', eps=0.1,
dense_output=False, random_state=None):
super(SparseRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
self.density_ = None
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
|
Just-D/chromium-1
|
refs/heads/master
|
tools/telemetry/third_party/gsutilz/third_party/boto/tests/unit/cloudtrail/test_layer1.py
|
91
|
#!/usr/bin/env python
import json
from boto.cloudtrail.layer1 import CloudTrailConnection
from tests.unit import AWSMockServiceTestCase
class TestDescribeTrails(AWSMockServiceTestCase):
connection_class = CloudTrailConnection
def default_body(self):
return b'''
{"trailList":
[
{
"IncludeGlobalServiceEvents": false,
"Name": "test",
"SnsTopicName": "cloudtrail-1",
"S3BucketName": "cloudtrail-1"
}
]
}'''
def test_describe(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.describe_trails()
self.assertEqual(1, len(api_response['trailList']))
self.assertEqual('test', api_response['trailList'][0]['Name'])
self.assert_request_parameters({})
target = self.actual_request.headers['X-Amz-Target']
self.assertTrue('DescribeTrails' in target)
def test_describe_name_list(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.describe_trails(
trail_name_list=['test'])
self.assertEqual(1, len(api_response['trailList']))
self.assertEqual('test', api_response['trailList'][0]['Name'])
self.assertEqual(json.dumps({
'trailNameList': ['test']
}), self.actual_request.body.decode('utf-8'))
target = self.actual_request.headers['X-Amz-Target']
self.assertTrue('DescribeTrails' in target)
class TestCreateTrail(AWSMockServiceTestCase):
connection_class = CloudTrailConnection
def default_body(self):
return b'''
{"trail":
{
"IncludeGlobalServiceEvents": false,
"Name": "test",
"SnsTopicName": "cloudtrail-1",
"S3BucketName": "cloudtrail-1"
}
}'''
def test_create(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_trail(
'test', 'cloudtrail-1', sns_topic_name='cloudtrail-1',
include_global_service_events=False)
self.assertEqual('test', api_response['trail']['Name'])
self.assertEqual('cloudtrail-1', api_response['trail']['S3BucketName'])
self.assertEqual('cloudtrail-1', api_response['trail']['SnsTopicName'])
self.assertEqual(False,
api_response['trail']['IncludeGlobalServiceEvents'])
target = self.actual_request.headers['X-Amz-Target']
self.assertTrue('CreateTrail' in target)
|
bbengfort/hadoop-fundamentals
|
refs/heads/master
|
wines/wines.py
|
5
|
## Spark Application for performing SGD regression on wines.
import csv
from numpy import array
from StringIO import StringIO
from pyspark import SparkConf, SparkContext
from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
# Load and parse the data
def parsePoint(line):
values = csv.reader(StringIO(line), delimiter=";").next() # CSV parsing of line
values = [float(x) for x in values] # Cast to all floats
return LabeledPoint(values[-1], values[:-1]) # y = quality, X = row[:-1]
if __name__ == '__main__':
conf = SparkConf().setMaster("local[*]").setAppName("Wine Regression")
sc = SparkContext(conf=conf)
wines = sc.textFile("winequality-red.csv")
parsedData = wines.map(parsePoint)
# Build the model
model = LinearRegressionWithSGD.train(parsedData)
# Evaluate the model on training data
valuesAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
MSE = valuesAndPreds.map(lambda (v, p): (v - p)**2).reduce(lambda x, y: x + y).count() / valuesAndPreds.count()
print("Mean Squared Error = " + str(MSE))
|
vstoykov/django-hvad
|
refs/heads/master
|
hvad/tests/serialization.py
|
7
|
import pickle
from django.utils import translation
from hvad.test_utils.testcase import HvadTestCase
from hvad.test_utils.project.app.models import Normal
class PicklingTest(HvadTestCase):
def test_untranslated_new_object_can_be_pickled(self):
normal = Normal(shared_field="Shared")
serialized_repr = pickle.dumps(normal)
unpickled = pickle.loads(serialized_repr)
self.assertEqual(normal.shared_field, unpickled.shared_field)
def test_translated_new_object_can_be_pickled(self):
normal = Normal(shared_field="Shared")
normal.translate("en")
normal.translated_field = "English"
serialized_repr = pickle.dumps(normal)
unpickled = pickle.loads(serialized_repr)
self.assertEqual(normal.shared_field, unpickled.shared_field)
self.assertEqual(normal.language_code, unpickled.language_code)
self.assertEqual(normal.translated_field, unpickled.translated_field)
def test_untranslated_object_can_be_pickled(self):
normal = Normal.objects.create(
shared_field="Shared",
)
serialized_repr = pickle.dumps(normal)
unpickled = pickle.loads(serialized_repr)
self.assertEqual(normal.shared_field, unpickled.shared_field)
def test_translated_object_can_be_pickled(self):
with translation.override('en'):
normal = Normal.objects.create(
shared_field="Shared",
translated_field = "English",
)
serialized_repr = pickle.dumps(normal)
unpickled = pickle.loads(serialized_repr)
self.assertEqual(normal.shared_field, unpickled.shared_field)
self.assertEqual(normal.language_code, unpickled.language_code)
self.assertEqual(normal.translated_field, unpickled.translated_field)
def test_queryset_can_be_pickled(self):
normal = Normal.objects.create(
shared_field="Shared",
)
qs = Normal.objects.all()
serialized_repr = pickle.dumps(qs)
unpickled = pickle.loads(serialized_repr)
self.assertEqual(unpickled.model, qs.model)
self.assertEqual(unpickled.get(pk=normal.pk), normal)
def test_queryset_with_translated_objects_can_be_pickled(self):
with translation.override('en'):
normal = Normal.objects.create(
shared_field="Shared",
translated_field = "English",
)
qs = Normal.objects.all()
serialized_repr = pickle.dumps(qs)
unpickled = pickle.loads(serialized_repr)
self.assertEqual(unpickled.model, qs.model)
self.assertEqual(unpickled.get(pk=normal.pk), normal)
def test_translated_queryset_with_translated_objects_can_be_pickled(self):
with translation.override('en'):
normal = Normal.objects.create(
shared_field="Shared",
translated_field = "English",
)
qs = Normal.objects.language('en').all()
serialized_repr = pickle.dumps(qs)
unpickled = pickle.loads(serialized_repr)
self.assertEqual(unpickled.model, qs.model)
self.assertEqual(unpickled.get(pk=normal.pk), normal)
|
PhonologicalCorpusTools/PyAnnotationGraph
|
refs/heads/master
|
tests/test_lexical.py
|
3
|
import pytest
from polyglotdb import CorpusContext
def test_lexicon_enrichment(timed_config, timed_lexicon_enrich_file):
with CorpusContext(timed_config) as c:
c.enrich_lexicon_from_csv(timed_lexicon_enrich_file)
q = c.query_graph(c.word).filter(c.word.neighborhood_density < 10)
q = q.columns(c.word.label.column_name('label'))
res = q.all()
assert (all(x['label'] == 'guess' for x in res))
q = c.query_graph(c.word).filter(c.word.label == 'i')
res = q.all()
assert (res[0]['frequency'] == 150)
assert (res[0]['part_of_speech'] == 'PRP')
assert (res[0]['neighborhood_density'] == 17)
q = c.query_graph(c.word).filter(c.word.label == 'cute')
res = q.all()
assert (res[0]['frequency'] is None)
assert (res[0]['part_of_speech'] == 'JJ')
assert (res[0]['neighborhood_density'] == 14)
# currently unsupported
levels = c.query_metadata(c.word).levels(c.word.part_of_speech)
assert set(levels) == {None, 'NN', 'VB', 'JJ', 'IN', 'PRP'}
def test_reset_enrich_lexicon(timed_config, timed_lexicon_enrich_file):
with CorpusContext(timed_config) as g:
g.reset_lexicon_csv(timed_lexicon_enrich_file)
assert ('frequency', int) not in g.hierarchy.type_properties['word']
statement = '''MATCH (n:word_type:{}) where n.frequency > 0 return count(n) as c'''.format(g.cypher_safe_name)
res = g.execute_cypher(statement)
for r in res:
assert r['c'] == 0
|
krasota/helloworld
|
refs/heads/master
|
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common_test.py
|
2542
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
laperry1/android_external_chromium_org
|
refs/heads/cm-12.1
|
tools/valgrind/drmemory_analyze.py
|
53
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# drmemory_analyze.py
''' Given a Dr. Memory output file, parses errors and uniques them.'''
from collections import defaultdict
import common
import hashlib
import logging
import optparse
import os
import re
import subprocess
import sys
import time
class DrMemoryError:
def __init__(self, report, suppression, testcase):
self._report = report
self._testcase = testcase
# Chromium-specific transformations of the suppressions:
# Replace 'any_test.exe' and 'chrome.dll' with '*', then remove the
# Dr.Memory-generated error ids from the name= lines as they don't
# make sense in a multiprocess report.
supp_lines = suppression.split("\n")
for l in xrange(len(supp_lines)):
if supp_lines[l].startswith("name="):
supp_lines[l] = "name=<insert_a_suppression_name_here>"
if supp_lines[l].startswith("chrome.dll!"):
supp_lines[l] = supp_lines[l].replace("chrome.dll!", "*!")
bang_index = supp_lines[l].find("!")
d_exe_index = supp_lines[l].find(".exe!")
if bang_index >= 4 and d_exe_index + 4 == bang_index:
supp_lines[l] = "*" + supp_lines[l][bang_index:]
self._suppression = "\n".join(supp_lines)
def __str__(self):
output = self._report + "\n"
if self._testcase:
output += "The report came from the `%s` test.\n" % self._testcase
output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
output += (" For more info on using suppressions see "
"http://dev.chromium.org/developers/how-tos/using-drmemory#TOC-Suppressing-error-reports-from-the-\n")
output += "{\n%s\n}\n" % self._suppression
return output
# This is a device-independent hash identifying the suppression.
# By printing out this hash we can find duplicate reports between tests and
# different shards running on multiple buildbots
def ErrorHash(self):
return int(hashlib.md5(self._suppression).hexdigest()[:16], 16)
def __hash__(self):
return hash(self._suppression)
def __eq__(self, rhs):
return self._suppression == rhs
class DrMemoryAnalyzer:
''' Given a set of Dr.Memory output files, parse all the errors out of
them, unique them and output the results.'''
def __init__(self):
self.known_errors = set()
self.error_count = 0;
def ReadLine(self):
self.line_ = self.cur_fd_.readline()
def ReadSection(self):
result = [self.line_]
self.ReadLine()
while len(self.line_.strip()) > 0:
result.append(self.line_)
self.ReadLine()
return result
def ParseReportFile(self, filename, testcase):
ret = []
# First, read the generated suppressions file so we can easily lookup a
# suppression for a given error.
supp_fd = open(filename.replace("results", "suppress"), 'r')
generated_suppressions = {} # Key -> Error #, Value -> Suppression text.
for line in supp_fd:
# NOTE: this regexp looks fragile. Might break if the generated
# suppression format slightly changes.
m = re.search("# Suppression for Error #([0-9]+)", line.strip())
if not m:
continue
error_id = int(m.groups()[0])
assert error_id not in generated_suppressions
# OK, now read the next suppression:
cur_supp = ""
for supp_line in supp_fd:
if supp_line.startswith("#") or supp_line.strip() == "":
break
cur_supp += supp_line
generated_suppressions[error_id] = cur_supp.strip()
supp_fd.close()
self.cur_fd_ = open(filename, 'r')
while True:
self.ReadLine()
if (self.line_ == ''): break
match = re.search("^Error #([0-9]+): (.*)", self.line_)
if match:
error_id = int(match.groups()[0])
self.line_ = match.groups()[1].strip() + "\n"
report = "".join(self.ReadSection()).strip()
suppression = generated_suppressions[error_id]
ret.append(DrMemoryError(report, suppression, testcase))
if re.search("SUPPRESSIONS USED:", self.line_):
self.ReadLine()
while self.line_.strip() != "":
line = self.line_.strip()
(count, name) = re.match(" *([0-9\?]+)x(?: \(.*?\))?: (.*)",
line).groups()
if (count == "?"):
# Whole-module have no count available: assume 1
count = 1
else:
count = int(count)
self.used_suppressions[name] += count
self.ReadLine()
if self.line_.startswith("ASSERT FAILURE"):
ret.append(self.line_.strip())
self.cur_fd_.close()
return ret
def Report(self, filenames, testcase, check_sanity):
sys.stdout.flush()
# TODO(timurrrr): support positive tests / check_sanity==True
self.used_suppressions = defaultdict(int)
to_report = []
reports_for_this_test = set()
for f in filenames:
cur_reports = self.ParseReportFile(f, testcase)
# Filter out the reports that were there in previous tests.
for r in cur_reports:
if r in reports_for_this_test:
# A similar report is about to be printed for this test.
pass
elif r in self.known_errors:
# A similar report has already been printed in one of the prev tests.
to_report.append("This error was already printed in some "
"other test, see 'hash=#%016X#'" % r.ErrorHash())
reports_for_this_test.add(r)
else:
self.known_errors.add(r)
reports_for_this_test.add(r)
to_report.append(r)
common.PrintUsedSuppressionsList(self.used_suppressions)
if not to_report:
logging.info("PASS: No error reports found")
return 0
sys.stdout.flush()
sys.stderr.flush()
logging.info("Found %i error reports" % len(to_report))
for report in to_report:
self.error_count += 1
logging.info("Report #%d\n%s" % (self.error_count, report))
logging.info("Total: %i error reports" % len(to_report))
sys.stdout.flush()
return -1
def main():
'''For testing only. The DrMemoryAnalyze class should be imported instead.'''
parser = optparse.OptionParser("usage: %prog <files to analyze>")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("no filename specified")
filenames = args
logging.getLogger().setLevel(logging.INFO)
return DrMemoryAnalyzer().Report(filenames, None, False)
if __name__ == '__main__':
sys.exit(main())
|
bclau/nova
|
refs/heads/master
|
nova/tests/virt/disk/test_api.py
|
11
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
import fixtures
from nova import test
from nova.virt.disk import api
class APITestCase(test.NoDBTestCase):
def test_can_resize_need_fs_type_specified(self):
# NOTE(mikal): Bug 1094373 saw a regression where we failed to
# treat a failure to mount as a failure to be able to resize the
# filesystem
def _fake_get_disk_size(path):
return 10
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.disk.api.get_disk_size', _fake_get_disk_size))
def fake_trycmd(*args, **kwargs):
return '', 'broken'
self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
def fake_returns_true(*args, **kwargs):
return True
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.disk.mount.nbd.NbdMount.get_dev',
fake_returns_true))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.disk.mount.nbd.NbdMount.map_dev',
fake_returns_true))
# Force the use of localfs, which is what was used during the failure
# reported in the bug
def fake_import_fails(*args, **kwargs):
raise Exception('Failed')
self.useFixture(fixtures.MonkeyPatch(
'nova.openstack.common.importutils.import_module',
fake_import_fails))
imgfile = tempfile.NamedTemporaryFile()
self.addCleanup(imgfile.close)
self.assertFalse(api.is_image_partitionless(imgfile, use_cow=True))
|
loulich/Couchpotato
|
refs/heads/master
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/newstube.py
|
37
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class NewstubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?newstube\.ru/media/(?P<id>.+)'
_TEST = {
'url': 'http://www.newstube.ru/media/telekanal-cnn-peremestil-gorod-slavyansk-v-krym',
'info_dict': {
'id': '728e0ef2-e187-4012-bac0-5a081fdcb1f6',
'ext': 'flv',
'title': 'Телеканал CNN переместил город Славянск в Крым',
'description': 'md5:419a8c9f03442bc0b0a794d689360335',
'duration': 31.05,
},
'params': {
# rtmp download
'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id, 'Downloading page')
video_guid = self._html_search_regex(
r'<meta property="og:video" content="https?://(?:www\.)?newstube\.ru/freshplayer\.swf\?guid=(?P<guid>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})',
page, 'video GUID')
player = self._download_xml(
'http://p.newstube.ru/v2/player.asmx/GetAutoPlayInfo6?state=&url=%s&sessionId=&id=%s&placement=profile&location=n2' % (url, video_guid),
video_guid, 'Downloading player XML')
def ns(s):
return s.replace('/', '/%(ns)s') % {'ns': '{http://app1.newstube.ru/N2SiteWS/player.asmx}'}
error_message = player.find(ns('./ErrorMessage'))
if error_message is not None:
raise ExtractorError('%s returned error: %s' % (self.IE_NAME, error_message.text), expected=True)
session_id = player.find(ns('./SessionId')).text
media_info = player.find(ns('./Medias/MediaInfo'))
title = media_info.find(ns('./Name')).text
description = self._og_search_description(page)
thumbnail = media_info.find(ns('./KeyFrame')).text
duration = int(media_info.find(ns('./Duration')).text) / 1000.0
formats = []
for stream_info in media_info.findall(ns('./Streams/StreamInfo')):
media_location = stream_info.find(ns('./MediaLocation'))
if media_location is None:
continue
server = media_location.find(ns('./Server')).text
app = media_location.find(ns('./App')).text
media_id = stream_info.find(ns('./Id')).text
quality_id = stream_info.find(ns('./QualityId')).text
name = stream_info.find(ns('./Name')).text
width = int(stream_info.find(ns('./Width')).text)
height = int(stream_info.find(ns('./Height')).text)
formats.append({
'url': 'rtmp://%s/%s' % (server, app),
'app': app,
'play_path': '01/%s' % video_guid.upper(),
'rtmp_conn': ['S:%s' % session_id, 'S:%s' % media_id, 'S:n2'],
'page_url': url,
'ext': 'flv',
'format_id': quality_id,
'format_note': name,
'width': width,
'height': height,
})
self._sort_formats(formats)
return {
'id': video_guid,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
|
rameshvs/nipype
|
refs/heads/master
|
nipype/interfaces/slicer/filtering/denoising.py
|
15
|
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class GradientAnisotropicDiffusionInputSpec(CommandLineInputSpec):
conductance = traits.Float(desc="Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", argstr="--conductance %f")
iterations = traits.Int(desc="The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", argstr="--iterations %d")
timeStep = traits.Float(desc="The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", argstr="--timeStep %f")
inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s")
class GradientAnisotropicDiffusionOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Output filtered", exists=True)
class GradientAnisotropicDiffusion(SEMLikeCommandLine):
"""title: Gradient Anisotropic Diffusion
category: Filtering.Denoising
description: Runs gradient anisotropic diffusion on a volume.
Anisotropic diffusion methods reduce noise (or unwanted detail) in images while preserving specific image features, like edges. For many applications, there is an assumption that light-dark transitions (edges) are interesting. Standard isotropic diffusion methods move and blur light-dark boundaries. Anisotropic diffusion methods are formulated to specifically preserve edges. The conductance term for this implementation is a function of the gradient magnitude of the image at each point, reducing the strength of diffusion at edges. The numerical implementation of this equation is similar to that described in the Perona-Malik paper, but uses a more robust technique for gradient magnitude estimation and has been generalized to N-dimensions.
version: 0.1.0.$Revision: 19608 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GradientAnisotropicDiffusion
contributor: Bill Lorensen (GE)
acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium
"""
input_spec = GradientAnisotropicDiffusionInputSpec
output_spec = GradientAnisotropicDiffusionOutputSpec
_cmd = "GradientAnisotropicDiffusion "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class CurvatureAnisotropicDiffusionInputSpec(CommandLineInputSpec):
conductance = traits.Float(desc="Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", argstr="--conductance %f")
iterations = traits.Int(desc="The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", argstr="--iterations %d")
timeStep = traits.Float(desc="The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", argstr="--timeStep %f")
inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s")
class CurvatureAnisotropicDiffusionOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Output filtered", exists=True)
class CurvatureAnisotropicDiffusion(SEMLikeCommandLine):
"""title: Curvature Anisotropic Diffusion
category: Filtering.Denoising
description: Performs anisotropic diffusion on an image using a modified curvature diffusion equation (MCDE).
MCDE does not exhibit the edge enhancing properties of classic anisotropic diffusion, which can under certain conditions undergo a 'negative' diffusion, which enhances the contrast of edges. Equations of the form of MCDE always undergo positive diffusion, with the conductance term only varying the strength of that diffusion.
Qualitatively, MCDE compares well with other non-linear diffusion techniques. It is less sensitive to contrast than classic Perona-Malik style diffusion, and preserves finer detailed structures in images. There is a potential speed trade-off for using this function in place of Gradient Anisotropic Diffusion. Each iteration of the solution takes roughly twice as long. Fewer iterations, however, may be required to reach an acceptable solution.
version: 0.1.0.$Revision: 19608 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CurvatureAnisotropicDiffusion
contributor: Bill Lorensen (GE)
acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium
"""
input_spec = CurvatureAnisotropicDiffusionInputSpec
output_spec = CurvatureAnisotropicDiffusionOutputSpec
_cmd = "CurvatureAnisotropicDiffusion "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class GaussianBlurImageFilterInputSpec(CommandLineInputSpec):
sigma = traits.Float(desc="Sigma value in physical units (e.g., mm) of the Gaussian kernel", argstr="--sigma %f")
inputVolume = File(position=-2, desc="Input volume", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Blurred Volume", argstr="%s")
class GaussianBlurImageFilterOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Blurred Volume", exists=True)
class GaussianBlurImageFilter(SEMLikeCommandLine):
"""title: Gaussian Blur Image Filter
category: Filtering.Denoising
description: Apply a gaussian blurr to an image
version: 0.1.0.$Revision: 1.1 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GaussianBlurImageFilter
contributor: Julien Jomier (Kitware), Stephen Aylward (Kitware)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = GaussianBlurImageFilterInputSpec
output_spec = GaussianBlurImageFilterOutputSpec
_cmd = "GaussianBlurImageFilter "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class MedianImageFilterInputSpec(CommandLineInputSpec):
neighborhood = InputMultiPath(traits.Int, desc="The size of the neighborhood in each dimension", sep=",", argstr="--neighborhood %s")
inputVolume = File(position=-2, desc="Input volume to be filtered", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output filtered", argstr="%s")
class MedianImageFilterOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Output filtered", exists=True)
class MedianImageFilter(SEMLikeCommandLine):
"""title: Median Image Filter
category: Filtering.Denoising
description: The MedianImageFilter is commonly used as a robust approach for noise reduction. This filter is particularly efficient against "salt-and-pepper" noise. In other words, it is robust to the presence of gray-level outliers. MedianImageFilter computes the value of each output pixel as the statistical median of the neighborhood of values around the corresponding input pixel.
version: 0.1.0.$Revision: 19608 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MedianImageFilter
contributor: Bill Lorensen (GE)
acknowledgements: This command module was derived from Insight/Examples/Filtering/MedianImageFilter (copyright) Insight Software Consortium
"""
input_spec = MedianImageFilterInputSpec
output_spec = MedianImageFilterOutputSpec
_cmd = "MedianImageFilter "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
|
turbokongen/home-assistant
|
refs/heads/dev
|
homeassistant/components/limitlessled/__init__.py
|
36
|
"""The limitlessled component."""
|
kronicz/ecommerce-2
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/ipaddress.py
|
198
|
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.14'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?'
)
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
addr1.address_exclude(addr2) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
addr1.address_exclude(addr2) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address)
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen
))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
def subnet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address <= self.network_address and
other.broadcast_address >= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
def supernet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
|
Mazecreator/tensorflow
|
refs/heads/master
|
tensorflow/contrib/ffmpeg/decode_audio_op_test.py
|
33
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.decode_audio_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import six
from tensorflow.contrib import ffmpeg
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class DecodeAudioOpTest(test.TestCase):
def _loadFileAndTest(self, filename, file_format, duration_sec,
samples_per_second, channel_count,
samples_per_second_tensor=None, feed_dict=None):
"""Loads an audio file and validates the output tensor.
Args:
filename: The filename of the input file.
file_format: The format of the input file.
duration_sec: The duration of the audio contained in the file in seconds.
samples_per_second: The desired sample rate in the output tensor.
channel_count: The desired channel count in the output tensor.
samples_per_second_tensor: The value to pass to the corresponding
parameter in the instantiated `decode_audio` op. If not
provided, will default to a constant value of
`samples_per_second`. Useful for providing a placeholder.
feed_dict: Used when evaluating the `decode_audio` op. If not
provided, will be empty. Useful when providing a placeholder for
`samples_per_second_tensor`.
"""
if samples_per_second_tensor is None:
samples_per_second_tensor = samples_per_second
with self.test_session():
path = os.path.join(resource_loader.get_data_files_path(), 'testdata',
filename)
with open(path, 'rb') as f:
contents = f.read()
audio_op = ffmpeg.decode_audio(
contents,
file_format=file_format,
samples_per_second=samples_per_second_tensor,
channel_count=channel_count)
audio = audio_op.eval(feed_dict=feed_dict or {})
self.assertEqual(len(audio.shape), 2)
self.assertNear(
duration_sec * samples_per_second,
audio.shape[0],
# Duration should be specified within 10%:
0.1 * audio.shape[0])
self.assertEqual(audio.shape[1], channel_count)
def testMonoMp3(self):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000, 1)
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000, 2)
def testMonoMp4Mp3Codec(self):
# mp3 compressed audio streams in mp4 container.
self._loadFileAndTest('mono_16khz_mp3.mp4', 'mp4', 2.77, 20000, 1)
self._loadFileAndTest('mono_16khz_mp3.mp4', 'mp4', 2.77, 20000, 2)
def testMonoMp4AacCodec(self):
# aac compressed audio streams in mp4 container.
self._loadFileAndTest('mono_32khz_aac.mp4', 'mp4', 2.77, 20000, 1)
self._loadFileAndTest('mono_32khz_aac.mp4', 'mp4', 2.77, 20000, 2)
def testStereoMp3(self):
self._loadFileAndTest('stereo_48khz.mp3', 'mp3', 0.79, 50000, 1)
self._loadFileAndTest('stereo_48khz.mp3', 'mp3', 0.79, 20000, 2)
def testStereoMp4Mp3Codec(self):
# mp3 compressed audio streams in mp4 container.
self._loadFileAndTest('stereo_48khz_mp3.mp4', 'mp4', 0.79, 50000, 1)
self._loadFileAndTest('stereo_48khz_mp3.mp4', 'mp4', 0.79, 20000, 2)
def testStereoMp4AacCodec(self):
# aac compressed audio streams in mp4 container.
self._loadFileAndTest('stereo_48khz_aac.mp4', 'mp4', 0.79, 50000, 1)
self._loadFileAndTest('stereo_48khz_aac.mp4', 'mp4', 0.79, 20000, 2)
def testMonoWav(self):
self._loadFileAndTest('mono_10khz.wav', 'wav', 0.57, 5000, 1)
self._loadFileAndTest('mono_10khz.wav', 'wav', 0.57, 10000, 4)
def testOgg(self):
self._loadFileAndTest('mono_10khz.ogg', 'ogg', 0.57, 10000, 1)
def testInvalidFile(self):
with self.test_session():
contents = 'invalid file'
audio_op = ffmpeg.decode_audio(
contents,
file_format='wav',
samples_per_second=10000,
channel_count=2)
audio = audio_op.eval()
self.assertEqual(audio.shape, (0, 0))
def testSampleRatePlaceholder(self):
placeholder = array_ops.placeholder(dtypes.int32)
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000, 1,
samples_per_second_tensor=placeholder,
feed_dict={placeholder: 20000})
def testSampleRateBadType(self):
placeholder = array_ops.placeholder(dtypes.float32)
with self.assertRaises(TypeError):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000.0, 1,
samples_per_second_tensor=placeholder,
feed_dict={placeholder: 20000.0})
def testSampleRateBadValue_Zero(self):
placeholder = array_ops.placeholder(dtypes.int32)
with six.assertRaisesRegex(self, Exception,
r'samples_per_second must be positive'):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000.0, 1,
samples_per_second_tensor=placeholder,
feed_dict={placeholder: 0})
def testSampleRateBadValue_Negative(self):
placeholder = array_ops.placeholder(dtypes.int32)
with six.assertRaisesRegex(self, Exception,
r'samples_per_second must be positive'):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000.0, 1,
samples_per_second_tensor=placeholder,
feed_dict={placeholder: -2})
def testInvalidFileFormat(self):
with six.assertRaisesRegex(self, Exception,
r'file_format must be one of'):
self._loadFileAndTest('mono_16khz.mp3', 'docx', 0.57, 20000, 1)
def testStaticShapeInference_ConstantChannelCount(self):
with self.test_session():
audio_op = ffmpeg.decode_audio(b'~~~ wave ~~~',
file_format='wav',
samples_per_second=44100,
channel_count=2)
self.assertEqual([None, 2], audio_op.shape.as_list())
def testStaticShapeInference_NonConstantChannelCount(self):
with self.test_session():
channel_count = array_ops.placeholder(dtypes.int32)
audio_op = ffmpeg.decode_audio(b'~~~ wave ~~~',
file_format='wav',
samples_per_second=44100,
channel_count=channel_count)
self.assertEqual([None, None], audio_op.shape.as_list())
def testStaticShapeInference_ZeroChannelCountInvalid(self):
with self.test_session():
with six.assertRaisesRegex(self, Exception,
r'channel_count must be positive'):
ffmpeg.decode_audio(b'~~~ wave ~~~',
file_format='wav',
samples_per_second=44100,
channel_count=0)
def testStaticShapeInference_NegativeChannelCountInvalid(self):
with self.test_session():
with six.assertRaisesRegex(self, Exception,
r'channel_count must be positive'):
ffmpeg.decode_audio(b'~~~ wave ~~~',
file_format='wav',
samples_per_second=44100,
channel_count=-2)
if __name__ == '__main__':
test.main()
|
mozilla/bedrock
|
refs/heads/master
|
tests/functional/test_home.py
|
4
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.home import HomePage
@pytest.mark.skip_if_firefox(reason='Download button is displayed only to non-Firefox users')
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.nondestructive
@pytest.mark.parametrize('locale', ['en-US', 'de', 'fr'])
def test_download_button_is_displayed(locale, base_url, selenium):
page = HomePage(selenium, base_url, locale=locale).open()
assert page.is_primary_download_button_displayed
assert page.is_secondary_download_button_displayed
@pytest.mark.skip_if_not_firefox(reason='Firefox Accounts CTA is displayed only to Firefox users')
@pytest.mark.nondestructive
@pytest.mark.parametrize('locale', ['de', 'fr'])
def test_accounts_button_is_displayed_rest_tier_1(locale, base_url, selenium):
page = HomePage(selenium, base_url, locale=locale).open()
assert page.is_primary_accounts_button_displayed
assert page.is_secondary_accounts_button_displayed
assert not page.is_primary_download_button_displayed
assert not page.is_secondary_download_button_displayed
@pytest.mark.skip_if_not_firefox(reason='Alternative CTA is displayed only to Firefox users')
@pytest.mark.nondestructive
def test_accounts_button_is_displayed(base_url, selenium):
page = HomePage(selenium, base_url, locale='en-US').open()
assert page.is_primary_alt_button_displayed
assert page.is_secondary_accounts_button_displayed
assert not page.is_primary_download_button_displayed
assert not page.is_secondary_download_button_displayed
@pytest.mark.skip_if_firefox(reason='Download button is displayed only to non-Firefox users')
@pytest.mark.nondestructive
def test_legacy_download_button_is_displayed(base_url, selenium):
page = HomePage(selenium, base_url, locale='it').open()
assert page.is_primary_download_button_displayed
|
idncom/odoo
|
refs/heads/8.0
|
addons/account_cancel/__openerp__.py
|
261
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Cancel Journal Entries',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Allows canceling accounting entries.
====================================
This module adds 'Allow Canceling Entries' field on form view of account journal.
If set to true it allows user to cancel entries & invoices.
""",
'website': 'https://www.odoo.com/page/accounting',
'depends' : ['account'],
'data': ['account_cancel_view.xml' ],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
autosportlabs/kivy
|
refs/heads/master
|
examples/widgets/tabbed_panel_showcase.py
|
17
|
'''
TabbedPanel
============
Test of the widget TabbedPanel showing all capabilities.
'''
from kivy.app import App
from kivy.animation import Animation
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelHeader
from kivy.factory import Factory
class StandingHeader(TabbedPanelHeader):
pass
class CloseableHeader(TabbedPanelHeader):
pass
Factory.register('StandingHeader', cls=StandingHeader)
Factory.register('CloseableHeader', cls=CloseableHeader)
from kivy.lang import Builder
Builder.load_string('''
<TabShowcase>
but: _but
Button:
id: _but
text: 'Press to show Tabbed Panel'
on_release: root.show_tab()
<StandingHeader>
color: 0,0,0,0
disabled_color: self.color
Scatter:
do_translation: False
do_scale: False
do_rotation: False
auto_bring_to_front: False
rotation: 70
size_hint: None, None
size: lbl.size
center_x: root.center_x
center_y: root.center_y
Label:
id: lbl
text: root.text
size: root.size
color: 1, 1, 1, .5 if self.disabled else 1
pos: 0,0
<PanelLeft>
size_hint: (.45, .45)
pos_hint: {'center_x': .25, 'y': .55}
# replace the default tab with our custom tab class
default_tab_cls: sh.__class__
do_default_tab: True
default_tab_content: default_content.__self__
tab_width: 40
tab_height: 70
FloatLayout:
RstDocument:
id: default_content
text: '\\n'.join(("Standing tabs", "-------------",\
"Tabs in \\'%s\\' position" %root.tab_pos))
Image:
id: tab_2_content
source: 'data/images/defaulttheme-0.png'
Image:
id: tab_3_content
source: 'data/images/image-loading.gif'
StandingHeader:
id: sh
content: tab_2_content.__self__
text: 'tab 2'
StandingHeader:
content: tab_3_content
text: 'tab 3'
<CloseableHeader>
color: 0,0,0,0
disabled_color: self.color
# variable tab_width
text: 'tabx'
size_hint_x: None
width: self.texture_size[0] + 40
BoxLayout:
pos: root.pos
size_hint: None, None
size: root.size
padding: 3
Label:
id: lbl
text: root.text
BoxLayout:
size_hint: None, 1
orientation: 'vertical'
width: 22
Image:
source: 'tools/theming/defaulttheme/close.png'
on_touch_down:
if self.collide_point(*args[1].pos) :\
root.panel.remove_widget(root)
<PanelRight>
tab_pos: 'top_right'
size_hint: (.45, .45)
pos_hint: {'center_x': .75, 'y': .55}
# replace the default tab with our custom tab
default_tab: def_tab
# allow variable tab width
tab_width: None
FloatLayout:
RstDocument:
id: default_content
text: '\\n'.join(("Closeable tabs", "---------------",\
"- The tabs above are also scrollable",\
"- Tabs in \\'%s\\' position" %root.tab_pos))
Image:
id: tab_2_content
source: 'data/images/defaulttheme-0.png'
BoxLayout:
id: tab_3_content
BubbleButton:
text: 'Press to add new tab'
on_release: root.add_header()
BubbleButton:
text: 'Press set this tab as default'
on_release: root.default_tab = tab3
CloseableHeader:
id: def_tab
text: 'default tab'
content:default_content.__self__
panel: root
CloseableHeader:
text: 'tab2'
content: tab_2_content.__self__
panel: root
CloseableHeader:
id: tab3
text: 'tab3'
content: tab_3_content.__self__
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
<PanelbLeft>
tab_pos: 'bottom_left'
size_hint: (.45, .45)
pos_hint: {'center_x': .25, 'y': .02}
do_default_tab: False
TabbedPanelItem:
id: settings
text: 'Settings'
RstDocument:
text: '\\n'.join(("Normal tabs", "-------------",\
"Tabs in \\'%s\\' position" %root.tab_pos))
TabbedPanelItem:
text: 'tab2'
BubbleButton:
text: 'switch to settings'
on_press: root.switch_to(settings)
TabbedPanelItem:
text: 'tab3'
Image:
source: 'data/images/image-loading.gif'
<PanelbRight>
tab_pos: 'right_top'
size_hint: (.45, .45)
pos_hint: {'center_x': .75, 'y': .02}
default_tab: def_tab
tab_height: img.width
FloatLayout:
RstDocument:
id: default_content
text: '\\n'.join(("Image tabs","-------------",\
"1. Normal image tab","2. Image with Text","3. Rotated Image",\
"4. Tabs in \\'%s\\' position" %root.tab_pos))
Image:
id: tab_2_content
source: 'data/images/defaulttheme-0.png'
VideoPlayer:
id: tab_3_content
source: 'cityCC0.mpg'
TabbedPanelHeader:
id: def_tab
content:default_content.__self__
border: 0, 0, 0, 0
background_down: 'cityCC0.png'
background_normal:'sequenced_images/data/images/info.png'
TabbedPanelHeader:
id: tph
content: tab_2_content.__self__
BoxLayout:
pos: tph.pos
size: tph.size
orientation: 'vertical'
Image:
source: 'sequenced_images/data/images/info.png'\
if tph.state == 'normal' else 'cityCC0.png'
Label:
text: 'text & img'
TabbedPanelHeader:
id: my_header
content: tab_3_content.__self__
Scatter:
do_translation: False
do_scale: False
do_rotation: False
auto_bring_to_front: False
rotation: 90
size_hint: None, None
size: img.size
center: my_header.center
Image:
id: img
source: 'sequenced_images/data/images/info.png'\
if my_header.state == 'normal' else 'cityCC0.png'
size: my_header.size
allow_stretch: True
keep_ratio: False
''')
class Tp(TabbedPanel):
# override tab switching method to animate on tab switch
def switch_to(self, header):
anim = Animation(opacity=0, d=.24, t='in_out_quad')
def start_anim(_anim, child, in_complete, *lt):
_anim.start(child)
def _on_complete(*lt):
if header.content:
header.content.opacity = 0
anim = Animation(opacity=1, d=.43, t='in_out_quad')
start_anim(anim, header.content, True)
super(Tp, self).switch_to(header)
anim.bind(on_complete=_on_complete)
if self.current_tab.content:
start_anim(anim, self.current_tab.content, False)
else:
_on_complete()
class PanelLeft(Tp):
pass
class PanelRight(Tp):
def add_header(self):
self.add_widget(CloseableHeader(panel=self))
class PanelbLeft(Tp):
pass
class PanelbRight(Tp):
pass
class TabShowcase(FloatLayout):
def show_tab(self):
if not hasattr(self, 'tab'):
self.tab = tab = PanelLeft()
self.add_widget(tab)
self.tab1 = tab = PanelRight()
self.add_widget(tab)
self.tab2 = tab = PanelbRight()
self.add_widget(tab)
self.tab3 = tab = PanelbLeft()
self.add_widget(tab)
self.but.text = \
'Tabs in variable positions, press to change to top_left'
else:
values = ('left_top', 'left_mid', 'left_bottom', 'top_left',
'top_mid', 'top_right', 'right_top', 'right_mid',
'right_bottom', 'bottom_left', 'bottom_mid', 'bottom_right')
index = values.index(self.tab.tab_pos)
self.tab.tab_pos = self.tab1.tab_pos = self.tab2.tab_pos\
= self.tab3.tab_pos = values[(index + 1) % len(values)]
self.but.text = 'Tabs in \'%s\' position,' % self.tab.tab_pos\
+ '\n press to change to next pos'
class TestTabApp(App):
def build(self):
return TabShowcase()
if __name__ == '__main__':
TestTabApp().run()
|
rosmo/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ecs_service_facts.py
|
13
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service_facts
short_description: list or describe services in ecs
description:
- Lists or describes services in ecs.
version_added: "2.1"
author:
- "Mark Chance (@Java1Guy)"
- "Darek Kaczynski (@kaczynskid)"
requirements: [ json, botocore, boto3 ]
options:
details:
description:
- Set this to true if you want detailed information about the services.
required: false
default: 'false'
type: bool
events:
description:
- Whether to return ECS service events. Only has an effect if C(details) is true.
required: false
default: 'true'
type: bool
version_added: "2.6"
cluster:
description:
- The cluster ARNS in which to list the services.
required: false
default: 'default'
service:
description:
- One or more services to get details for
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
service: console-test-service
details: true
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
'''
RETURN = '''
services:
description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
returned: success
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: str
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: str
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: str
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: str
serviceName:
description: A user-generated string used to identify the service
returned: always
type: str
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: str
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: str
deployments:
description: list of service deployments
returned: always
type: list of complex
events:
description: list of service events
returned: when events is true
type: list of complex
''' # NOQA
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec, AWSRetry
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
self.ecs = module.client('ecs')
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_services_with_backoff(self, **kwargs):
paginator = self.ecs.get_paginator('list_services')
try:
return paginator.paginate(**kwargs).build_full_result()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ClusterNotFoundException':
self.module.fail_json_aws(e, "Could not find cluster to list services")
else:
raise
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def describe_services_with_backoff(self, **kwargs):
return self.ecs.describe_services(**kwargs)
def list_services(self, cluster):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
try:
response = self.list_services_with_backoff(**fn_args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't list ECS services")
relevant_response = dict(services=response['serviceArns'])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
fn_args['services'] = services
try:
response = self.describe_services_with_backoff(**fn_args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't describe ECS services")
running_services = [self.extract_service_from(service) for service in response.get('services', [])]
services_not_running = response.get('failures', [])
return running_services, services_not_running
def extract_service_from(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
if not self.module.params['events']:
del service['events']
else:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
""" https://stackoverflow.com/a/312464 """
for i in range(0, len(l), n):
yield l[i:i + n]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
details=dict(type='bool', default=False),
events=dict(type='bool', default=True),
cluster=dict(),
service=dict(type='list')
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
show_details = module.params.get('details')
task_mgr = EcsServiceManager(module)
if show_details:
if module.params['service']:
services = module.params['service']
else:
services = task_mgr.list_services(module.params['cluster'])['services']
ecs_facts = dict(services=[], services_not_running=[])
for chunk in chunks(services, 10):
running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk)
ecs_facts['services'].extend(running_services)
ecs_facts['services_not_running'].extend(services_not_running)
else:
ecs_facts = task_mgr.list_services(module.params['cluster'])
module.exit_json(changed=False, ansible_facts=ecs_facts, **ecs_facts)
if __name__ == '__main__':
main()
|
DepthDeluxe/ansible
|
refs/heads/devel
|
lib/ansible/parsing/splitter.py
|
59
|
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import re
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR * 8, _HEXCHAR * 4, _HEXCHAR * 2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
args = to_text(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleParserError("error parsing argument string, try quoting the entire line.", orig_exc=ve)
else:
raise
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx - 1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for (itemidx, item) in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for (idx, token) in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes and not(print_depth or block_depth or comment_depth):
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
params[-1] = "%s\n%s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise AnsibleParserError("failed at splitting arguments, either an unbalanced jinja2 block or quotes: {}".format(args))
return params
|
jdm/gemrb
|
refs/heads/master
|
gemrb/GUIScripts/demo/Start.py
|
5
|
import GemRB
def OnLoad():
GemRB.LoadGame(None)
# this is needed, so the game loop runs and the load happens
# before other code (eg. CreatePlayer) depending on it is run
GemRB.SetNextScript("SetupGame")
|
mkhoeini/zerorpc-python
|
refs/heads/master
|
zerorpc/patterns.py
|
24
|
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class ReqRep:
def process_call(self, context, bufchan, event, functor):
result = context.middleware_call_procedure(functor, *event.args)
bufchan.emit('OK', (result,), context.middleware_get_task_context())
def accept_answer(self, event):
return True
def process_answer(self, context, bufchan, event, method,
raise_remote_error):
result = event.args[0]
if event.name == 'ERR':
raise_remote_error(event)
bufchan.close()
return result
class ReqStream:
def process_call(self, context, bufchan, event, functor):
xheader = context.middleware_get_task_context()
for result in iter(context.middleware_call_procedure(functor,
*event.args)):
bufchan.emit('STREAM', result, xheader)
bufchan.emit('STREAM_DONE', None, xheader)
def accept_answer(self, event):
return event.name in ('STREAM', 'STREAM_DONE')
def process_answer(self, context, bufchan, event, method,
raise_remote_error):
def is_stream_done(event):
return event.name == 'STREAM_DONE'
bufchan.on_close_if = is_stream_done
def iterator(event):
while event.name == 'STREAM':
yield event.args
event = bufchan.recv()
if event.name == 'ERR':
raise_remote_error(event)
bufchan.close()
return iterator(event)
patterns_list = [ReqStream(), ReqRep()]
|
ryfeus/lambda-packs
|
refs/heads/master
|
pytorch/source/caffe2/python/layers/concat.py
|
1
|
## @package concat
# Module caffe2.python.layers.concat
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from future.utils import viewitems
import numpy as np
from collections import defaultdict
import logging
logger = logging.getLogger(__name__)
def get_concatenated_feature_to_index(blobs_to_concat):
concat_feature_to_index = defaultdict(list)
start_pos = 0
for scalar in blobs_to_concat:
num_dims = scalar.dtype.shape[0]
if hasattr(scalar, 'metadata') \
and hasattr(scalar.metadata, 'feature_specs') \
and hasattr(scalar.metadata.feature_specs, 'feature_to_index') \
and isinstance(scalar.metadata.feature_specs.feature_to_index, dict): # noqa B950
for k, v in scalar.metadata.feature_specs.feature_to_index.items():
concat_feature_to_index[k].extend([start_pos + vi for vi in v])
start_pos += num_dims
return dict(concat_feature_to_index) if concat_feature_to_index.keys() else None
class Concat(ModelLayer):
"""
Construct Concat layer
Assume that first dimension is batch,
Example:
embedding_dim = 64
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (embedding_dim, )))),
('input2', schema.Scalar((np.float32, (embedding_dim, )))),
('input3', schema.Scalar((np.float32, (embedding_dim, )))),
))
output = self.model.Concat(input_record)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields) * embedding_dim, )))),
output
)
# Note that in Concat layer we assume first dimension is batch.
# so input is B * embedding_dim
# add_axis=1 make it B * 1 * embedding_dim
# Concat on axis=1 make it B * N * embedding_dim
output = self.model.Concat(input_record, axis=1, add_axis=1)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields), embedding_dim)))),
output
)
"""
def __init__(self, model, input_record, axis=1, add_axis=0,
name='concat', **kwargs):
super(Concat, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
self.add_axis = add_axis
assert not (axis == 0 and add_axis == 1), \
"It's not allowed to add axis=0"
assert isinstance(input_record, schema.Struct),\
"Incorrect input type. Excpected Struct, but received: {0}".\
format(input_record)
shapes = []
for field_name, field_type in viewitems(input_record.fields):
assert isinstance(field_type, schema.Scalar),\
"Incorrect input type for {}. Excpected Scalar, but got: {}".\
format(field_name, field_type)
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
shape = list(field_type.field_type().shape)
if add_axis:
shape.insert(axis - 1, 1)
assert len(shape) >= axis,\
"Concat expects that limited dimensions of the input tensor"
shapes.append(shape)
logger.info('Concat Layer input shapes: ' + str(shapes))
if axis == 0:
self.output_schema = schema.from_blob_list(
input_record[0],
[self.get_next_blob_reference('output')]
)
return
concat_dim = 0
for shape in shapes:
concat_dim += shape[axis - 1]
shape[axis - 1] = 0
assert shape == shapes[0],\
"Shapes {0} and {1} are not compatible for Concat".\
format(shape, shapes[0])
output_dims = shapes[0]
output_dims[axis - 1] = concat_dim
logger.info('Concat Layer output_dims: ' + str(output_dims))
self.output_schema = schema.Scalar(
(np.float32, output_dims),
self.get_next_blob_reference('output'))
record_to_concat = input_record.fields.values()
concated_feature_to_index = get_concatenated_feature_to_index(
record_to_concat
)
if concated_feature_to_index:
metadata = schema.Metadata(
feature_specs=schema.FeatureSpec(
feature_to_index=concated_feature_to_index
)
)
self.output_schema.set_metadata(metadata)
def add_ops(self, net):
net.Concat(
self.input_record.field_blobs(),
[
self.output_schema.field_blobs()[0],
self.output_schema.field_blobs()[0] + "_concat_dims"
],
axis=self.axis,
add_axis=self.add_axis,
)
|
kenshinthebattosai/readthedocs.org
|
refs/heads/master
|
readthedocs/core/management/commands/archive.py
|
25
|
from glob import glob
import os
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from django.template import loader as template_loader
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""Custom management command to rebuild documentation for all projects on
the site. Invoked via ``./manage.py update_repos``.
"""
def handle(self, *args, **options):
doc_index = {}
os.chdir(settings.DOCROOT)
for directory in glob("*"):
doc_index[directory] = []
path = os.path.join(directory, 'rtd-builds')
for version in glob(os.path.join(path, "*")):
v = version.replace(path + '/', '')
doc_index[directory].append(v)
context = {
'doc_index': doc_index,
'MEDIA_URL': settings.MEDIA_URL,
}
html = template_loader.get_template('archive/index.html').render(context)
print html
|
MobinRanjbar/hue
|
refs/heads/master
|
apps/search/src/search/migrations/0001_initial.py
|
39
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Facet'
db.create_table('search_facet', (
('data', self.gf('django.db.models.fields.TextField')()),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('search', ['Facet'])
# Adding model 'Result'
db.create_table('search_result', (
('data', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('search', ['Result'])
# Adding model 'Sorting'
db.create_table('search_sorting', (
('data', self.gf('django.db.models.fields.TextField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('search', ['Sorting'])
# Adding model 'Core'
db.create_table('search_core', (
('sorting', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['search.Sorting'])),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
('facets', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['search.Facet'])),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('label', self.gf('django.db.models.fields.CharField')(max_length=100)),
('result', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['search.Result'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('properties', self.gf('django.db.models.fields.TextField')(default='[]')),
))
db.send_create_signal('search', ['Core'])
def backwards(self, orm):
# Deleting model 'Facet'
db.delete_table('search_facet')
# Deleting model 'Result'
db.delete_table('search_result')
# Deleting model 'Sorting'
db.delete_table('search_sorting')
# Deleting model 'Core'
db.delete_table('search_core')
models = {
'search.core': {
'Meta': {'object_name': 'Core'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'facets': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Facet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Result']"}),
'sorting': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Sorting']"})
},
'search.facet': {
'Meta': {'object_name': 'Facet'},
'data': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'search.result': {
'Meta': {'object_name': 'Result'},
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'search.sorting': {
'Meta': {'object_name': 'Sorting'},
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['search']
|
nharraud/b2share
|
refs/heads/master
|
invenio/legacy/external_authentication/openid.py
|
8
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This module contains functions and methods to authenticate with OpenID
providers.
"""
__revision__ = \
"$Id$"
from invenio.config import CFG_SITE_SECURE_URL
from invenio.legacy.external_authentication import ExternalAuth
from invenio.legacy.websession.session import get_session
class ExternalOpenID(ExternalAuth):
"""
Contains methods for authenticate with an OpenID provider.
"""
@staticmethod
def __init_req(req):
req.g['openid_provider_name'] = ''
req.g['openid_debug'] = 0
req.g['openid_msg'] = ''
req.g['openid_debug_msg'] = ''
req.g['openid_response'] = None
def auth_user(self, username, password, req=None):
"""
Tries to find email and OpenID identity of the user. If it
doesn't find any of them, returns (None, None)
@param username: Isn't used in this function
@type username: str
@param password: Isn't used in this function
@type password: str
@param req: request
@type req: invenio.legacy.wsgi.SimulatedModPythonRequest
@rtype: str|NoneType, str|NoneType
"""
from openid.consumer import consumer
self._get_response(req)
response = req.g['openid_response']
identity = None
email = None
if response.status == consumer.SUCCESS:
# In the first login of the user, fetches his/her email
# from OpenID provider.
email = self._get_email_from_success_response(req)
identity = response.getDisplayIdentifier()
elif response.status == consumer.CANCEL:
# If user cancels the verification, set corresponding message.
req.openid_msg = 21
elif response.status == consumer.FAILURE:
# If verification fails, set corresponding message.
req.openid_msg.msg = 22
return email, identity
@staticmethod
def get_msg(req):
return req.g['openid_msg']
def fetch_user_nickname(self, username, password=None, req=None):
"""
Fetches the OpenID provider for nickname of the user. If it doesn't
find any, returns None.
This function doesn't need username, password or req. They are exist
just because this class is derived from ExternalAuth
@param username: Isn't used in this function
@type username: str
@param password: Isn't used in this function
@type password: str
@param req: request
@type req: invenio.legacy.wsgi.SimulatedModPythonRequest
@rtype: str|NoneType
"""
from openid.extensions import ax
from openid.extensions import sreg
nickname = None
# May be either Simple Registration (sreg) response or
# Attribute Exchange (ax) response.
sreg_resp = None
ax_resp = None
response = req.g['openid_response']
sreg_resp = sreg.SRegResponse.fromSuccessResponse(response)
if sreg_resp:
if 'nickname' in sreg_resp.getExtensionArgs():
nickname = sreg_resp.getExtensionArgs()['nickname']
ax_resp = ax.FetchResponse.fromSuccessResponse(response)
if ax_resp and not nickname:
extensions = ax_resp.getExtensionArgs()
if 'type.ext0' in extensions and \
'value.ext0.1' in extensions:
if extensions['type.ext0'] == \
'http://axschema.org/namePerson/friendly':
nickname = extensions['value.ext0.1']
if 'type.ext1' in extensions and \
'value.ext1.1' in extensions and not nickname:
if extensions['type.ext1'] == \
'http://axschema.org/namePerson/friendly':
nickname = extensions['value.ext1.1']
return nickname
@staticmethod
def _get_email_from_success_response(req):
"""
Fetches the email from consumer.SuccessResponse. If it doesn't find any
returns None.
@rtype: str|NoneType
"""
from openid.extensions import ax
email = None
response = req.g['openid_response']
ax_resp = ax.FetchResponse.fromSuccessResponse(response)
if ax_resp:
extensions = ax_resp.getExtensionArgs()
if 'type.ext0' in extensions and \
'value.ext0.1' in extensions:
if extensions['type.ext0'] == \
'http://axschema.org/contact/email':
email = extensions['value.ext0.1']
if 'type.ext1' in extensions and \
'value.ext1.1' in extensions and not email:
if extensions['type.ext1'] == \
'http://axschema.org/contact/email':
email = extensions['value.ext1.1']
return email
@staticmethod
def _get_response(req):
"""
Constructs the response returned from the OpenID provider
@param req: request
@type req: invenio.legacy.wsgi.SimulatedModPythonRequest
"""
from invenio.ext.legacy.handler import wash_urlargd
from openid.consumer import consumer
content = {}
for key in req.form.keys():
content[key] = (str, '')
args = wash_urlargd(req.form, content)
if 'ln' in args:
del args['ln']
if 'referer' in args:
if not args['referer']:
del args['referer']
oidconsumer = consumer.Consumer({"id": get_session(req)}, None)
url = CFG_SITE_SECURE_URL + "/youraccount/login"
req.g['openid_provider_name'] = args['provider']
req.g['openid_response'] = oidconsumer.complete(args, url)
|
leighpauls/k2cro4
|
refs/heads/master
|
third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
|
1
|
# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for png.py."""
import unittest
from png import PNGChecker
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost_mock import MockSystemHost
class MockSCMDetector(object):
def __init__(self, scm, prop=None):
self._scm = scm
self._prop = prop
def display_name(self):
return self._scm
def propget(self, pname, path):
return self._prop
class PNGCheckerTest(unittest.TestCase):
"""Tests PNGChecker class."""
def test_init(self):
"""Test __init__() method."""
def mock_handle_style_error(self):
pass
checker = PNGChecker("test/config", mock_handle_style_error, MockSCMDetector('git'), MockSystemHost())
self.assertEqual(checker._file_path, "test/config")
self.assertEqual(checker._handle_style_error, mock_handle_style_error)
def test_check(self):
errors = []
def mock_handle_style_error(line_number, category, confidence, message):
error = (line_number, category, confidence, message)
errors.append(error)
file_path = ''
fs = MockFileSystem()
scm = MockSCMDetector('svn')
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0],
(0, 'image/png', 5, 'Set the svn:mime-type property (svn propset svn:mime-type image/png ).'))
files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n#enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes\nenable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': 'enable-auto-props = no'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
file_path = "foo.png"
fs.write_binary_file(file_path, "Dummy binary data")
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
file_path = "foo-expected.png"
fs.write_binary_file(file_path, "Dummy binary data")
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0], (0, 'image/png', 5, 'Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.'))
if __name__ == '__main__':
unittest.main()
|
akionakamura/scikit-learn
|
refs/heads/master
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
msduketown/SublimeKodi
|
refs/heads/master
|
libs-mac/PIL/ExifTags.py
|
71
|
#
# The Python Imaging Library.
# $Id$
#
# EXIF tags
#
# Copyright (c) 2003 by Secret Labs AB
#
# See the README file for information on usage and redistribution.
#
##
# This module provides constants and clear-text names for various
# well-known EXIF tags.
##
##
# Maps EXIF tags to tag names.
TAGS = {
# possibly incomplete
0x00fe: "NewSubfileType",
0x00ff: "SubfileType",
0x0100: "ImageWidth",
0x0101: "ImageLength",
0x0102: "BitsPerSample",
0x0103: "Compression",
0x0106: "PhotometricInterpretation",
0x0107: "Threshholding",
0x0108: "CellWidth",
0x0109: "CellLenght",
0x010a: "FillOrder",
0x010d: "DocumentName",
0x011d: "PageName",
0x010e: "ImageDescription",
0x010f: "Make",
0x0110: "Model",
0x0111: "StripOffsets",
0x0112: "Orientation",
0x0115: "SamplesPerPixel",
0x0116: "RowsPerStrip",
0x0117: "StripByteConunts",
0x0118: "MinSampleValue",
0x0119: "MaxSampleValue",
0x011a: "XResolution",
0x011b: "YResolution",
0x011c: "PlanarConfiguration",
0x0120: "FreeOffsets",
0x0121: "FreeByteCounts",
0x0122: "GrayResponseUnit",
0x0123: "GrayResponseCurve",
0x0128: "ResolutionUnit",
0x012d: "TransferFunction",
0x0131: "Software",
0x0132: "DateTime",
0x013b: "Artist",
0x013c: "HostComputer",
0x013e: "WhitePoint",
0x013f: "PrimaryChromaticities",
0x0140: "ColorMap",
0x0152: "ExtraSamples",
0x0201: "JpegIFOffset",
0x0202: "JpegIFByteCount",
0x0211: "YCbCrCoefficients",
0x0212: "YCbCrSubSampling",
0x0213: "YCbCrPositioning",
0x0214: "ReferenceBlackWhite",
0x1000: "RelatedImageFileFormat",
0x1001: "RelatedImageWidth",
0x1002: "RelatedImageLength",
0x828d: "CFARepeatPatternDim",
0x828e: "CFAPattern",
0x828f: "BatteryLevel",
0x8298: "Copyright",
0x829a: "ExposureTime",
0x829d: "FNumber",
0x8769: "ExifOffset",
0x8773: "InterColorProfile",
0x8822: "ExposureProgram",
0x8824: "SpectralSensitivity",
0x8825: "GPSInfo",
0x8827: "ISOSpeedRatings",
0x8828: "OECF",
0x8829: "Interlace",
0x882a: "TimeZoneOffset",
0x882b: "SelfTimerMode",
0x9000: "ExifVersion",
0x9003: "DateTimeOriginal",
0x9004: "DateTimeDigitized",
0x9101: "ComponentsConfiguration",
0x9102: "CompressedBitsPerPixel",
0x9201: "ShutterSpeedValue",
0x9202: "ApertureValue",
0x9203: "BrightnessValue",
0x9204: "ExposureBiasValue",
0x9205: "MaxApertureValue",
0x9206: "SubjectDistance",
0x9207: "MeteringMode",
0x9208: "LightSource",
0x9209: "Flash",
0x920a: "FocalLength",
0x920b: "FlashEnergy",
0x920c: "SpatialFrequencyResponse",
0x920d: "Noise",
0x9211: "ImageNumber",
0x9212: "SecurityClassification",
0x9213: "ImageHistory",
0x9214: "SubjectLocation",
0x9215: "ExposureIndex",
0x9216: "TIFF/EPStandardID",
0x927c: "MakerNote",
0x9286: "UserComment",
0x9290: "SubsecTime",
0x9291: "SubsecTimeOriginal",
0x9292: "SubsecTimeDigitized",
0xa000: "FlashPixVersion",
0xa001: "ColorSpace",
0xa002: "ExifImageWidth",
0xa003: "ExifImageHeight",
0xa004: "RelatedSoundFile",
0xa005: "ExifInteroperabilityOffset",
0xa20b: "FlashEnergy",
0xa20c: "SpatialFrequencyResponse",
0xa20e: "FocalPlaneXResolution",
0xa20f: "FocalPlaneYResolution",
0xa210: "FocalPlaneResolutionUnit",
0xa214: "SubjectLocation",
0xa215: "ExposureIndex",
0xa217: "SensingMethod",
0xa300: "FileSource",
0xa301: "SceneType",
0xa302: "CFAPattern",
0xa401: "CustomRendered",
0xa402: "ExposureMode",
0xa403: "WhiteBalance",
0xa404: "DigitalZoomRatio",
0xa405: "FocalLengthIn35mmFilm",
0xa406: "SceneCaptureType",
0xa407: "GainControl",
0xa408: "Contrast",
0xa409: "Saturation",
0xa40a: "Sharpness",
0xa40b: "DeviceSettingDescription",
0xa40c: "SubjectDistanceRange",
0xa420: "ImageUniqueID",
0xa430: "CameraOwnerName",
0xa431: "BodySerialNumber",
0xa432: "LensSpecification",
0xa433: "LensMake",
0xa434: "LensModel",
0xa435: "LensSerialNumber",
0xa500: "Gamma",
}
##
# Maps EXIF GPS tags to tag names.
GPSTAGS = {
0: "GPSVersionID",
1: "GPSLatitudeRef",
2: "GPSLatitude",
3: "GPSLongitudeRef",
4: "GPSLongitude",
5: "GPSAltitudeRef",
6: "GPSAltitude",
7: "GPSTimeStamp",
8: "GPSSatellites",
9: "GPSStatus",
10: "GPSMeasureMode",
11: "GPSDOP",
12: "GPSSpeedRef",
13: "GPSSpeed",
14: "GPSTrackRef",
15: "GPSTrack",
16: "GPSImgDirectionRef",
17: "GPSImgDirection",
18: "GPSMapDatum",
19: "GPSDestLatitudeRef",
20: "GPSDestLatitude",
21: "GPSDestLongitudeRef",
22: "GPSDestLongitude",
23: "GPSDestBearingRef",
24: "GPSDestBearing",
25: "GPSDestDistanceRef",
26: "GPSDestDistance",
27: "GPSProcessingMethod",
28: "GPSAreaInformation",
29: "GPSDateStamp",
30: "GPSDifferential",
31: "GPSHPositioningError",
}
|
chand3040/sree_odoo
|
refs/heads/master
|
openerp/addons/report/tests/test_reports.py
|
385
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
import openerp.tests
_logger = logging.getLogger(__name__)
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestReports(openerp.tests.TransactionCase):
def test_reports(self):
registry, cr, uid = self.registry, self.cr, self.uid
r_model = registry('ir.actions.report.xml')
domain = [('report_type', 'like', 'qweb')]
for r in r_model.browse(cr, uid, r_model.search(cr, uid, domain)):
report_model = 'report.%s' % r.report_name
try:
registry(report_model)
except KeyError:
# Only test the generic reports here
_logger.info("testing report %s", r.report_name)
report_model = registry(r.model)
report_model_ids = report_model.search(cr, uid, [], limit=10)
if not report_model_ids:
_logger.info("no record found skipping report %s", r.report_name)
if not r.multi:
report_model_ids = report_model_ids[:1]
# Test report generation
registry('report').get_html(cr, uid, report_model_ids, r.report_name)
else:
continue
|
dvliman/jaikuengine
|
refs/heads/master
|
common/management/commands/clean.py
|
35
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand
import build
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--skip-zip', action='store_true', dest='skip_zip', default=False,
help='Do not clean up zip files'
),
)
help = 'Cleans up the results of a build'
args = ''
requires_model_validation = False
def handle(self, *test_labels, **options):
skip_zip = options.get('skip_zip', False)
build.clean(skip_zip=skip_zip)
|
sapetnioc/brainvisa-maker
|
refs/heads/master
|
python/bv_build/workers/__init__.py
|
1
|
class RequirementWorker(object):
@staticmethod
def init_check_requirements(dir_manager, verbose):
pass
@staticmethod
def check_module_requirement(dir_manager, module, verbose, **kwargs):
raise NotImplementedError()
@staticmethod
def resolve_requirement(dir_manager, verbose, **kwargs):
raise NotImplementedError()
@staticmethod
def missing_requirements_error_message(dir_manager, missing_requirements):
raise NotImplementedError()
class SourceWorker(object):
pass
class BuildWorker(object):
pass
worker_class_by_type = dict(
requirement=RequirementWorker,
source=SourceWorker,
build=BuildWorker,
)
def get_worker(worker_name, worker_type):
m = __import__('bv_build.workers.%s' % worker_name, fromlist=[''], level=0)
worker_class = worker_class_by_type[worker_type]
# Return one of the classes defined in module. There should be
# exaclty one class.
return (i for i in m.__dict__.itervalues() if isinstance(i,type) and
issubclass(i, worker_class) and i.__module__ == m.__name__).next()
|
alessiodm/cloudbrain
|
refs/heads/master
|
cloudbrain/publishers/PikaPublisher.py
|
7
|
import json
import pika
from cloudbrain.publishers.PublisherInterface import Publisher
class PikaPublisher(Publisher):
"""
Publisher implementation for RabbitMQ via the Pika client
"""
def __init__(self, device_name, device_id, rabbitmq_address, metric_name):
super(PikaPublisher, self).__init__(device_name, device_id, rabbitmq_address)
self.connection = None
self.channel = None
self.metric_name = metric_name
def publish(self, buffer_content):
key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
self.channel.basic_publish(exchange=key,
routing_key=key,
body=json.dumps(buffer_content),
properties=pika.BasicProperties(
delivery_mode=2, # this makes the message persistent
))
def connect(self):
credentials = pika.PlainCredentials('cloudbrain', 'cloudbrain')
self.connection = pika.BlockingConnection(pika.ConnectionParameters(
host=self.host, credentials=credentials))
self.channel = self.connection.channel()
key = "%s:%s:%s" % (self.device_id, self.device_name, self.metric_name)
self.channel.exchange_declare(exchange=key,
type='direct')
def disconnect(self):
self.connection.close_file()
|
nexiles/odoo
|
refs/heads/8.0
|
addons/account/installer.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from dateutil.relativedelta import relativedelta
import logging
from operator import itemgetter
import time
import urllib2
import urlparse
try:
import simplejson as json
except ImportError:
import json # noqa
from openerp.release import serie
from openerp.tools.translate import _
from openerp.osv import fields, osv
_logger = logging.getLogger(__name__)
class account_installer(osv.osv_memory):
_name = 'account.installer'
_inherit = 'res.config.installer'
def _get_charts(self, cr, uid, context=None):
modules = self.pool.get('ir.module.module')
# try get the list on apps server
try:
apps_server = self.pool.get('ir.module.module').get_apps_server(cr, uid, context=context)
up = urlparse.urlparse(apps_server)
url = '{0.scheme}://{0.netloc}/apps/charts?serie={1}'.format(up, serie)
j = urllib2.urlopen(url, timeout=3).read()
apps_charts = json.loads(j)
charts = dict(apps_charts)
except Exception:
charts = dict()
# Looking for the module with the 'Account Charts' category
category_name, category_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'module_category_localization_account_charts')
ids = modules.search(cr, uid, [('category_id', '=', category_id)], context=context)
if ids:
charts.update((m.name, m.shortdesc) for m in modules.browse(cr, uid, ids, context=context))
charts = sorted(charts.items(), key=itemgetter(1))
charts.insert(0, ('configurable', _('Custom')))
return charts
_columns = {
# Accounting
'charts': fields.selection(_get_charts, 'Accounting Package',
required=True,
help="Installs localized accounting charts to match as closely as "
"possible the accounting needs of your company based on your "
"country."),
'date_start': fields.date('Start Date', required=True),
'date_stop': fields.date('End Date', required=True),
'period': fields.selection([('month', 'Monthly'), ('3months', '3 Monthly')], 'Periods', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'has_default_company': fields.boolean('Has Default Company', readonly=True),
}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id and user.company_id.id or False
def _default_has_default_company(self, cr, uid, context=None):
count = self.pool.get('res.company').search_count(cr, uid, [], context=context)
return bool(count == 1)
_defaults = {
'date_start': lambda *a: time.strftime('%Y-01-01'),
'date_stop': lambda *a: time.strftime('%Y-12-31'),
'period': 'month',
'company_id': _default_company,
'has_default_company': _default_has_default_company,
'charts': 'configurable'
}
def get_unconfigured_cmp(self, cr, uid, context=None):
""" get the list of companies that have not been configured yet
but don't care about the demo chart of accounts """
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context)
cr.execute("SELECT company_id FROM account_account WHERE active = 't' AND account_account.parent_id IS NULL AND name != %s", ("Chart For Automated Tests",))
configured_cmp = [r[0] for r in cr.fetchall()]
return list(set(company_ids)-set(configured_cmp))
def check_unconfigured_cmp(self, cr, uid, context=None):
""" check if there are still unconfigured companies """
if not self.get_unconfigured_cmp(cr, uid, context=context):
raise osv.except_osv(_('No Unconfigured Company!'), _("There is currently no company without chart of account. The wizard will therefore not be executed."))
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None: context = {}
res = super(account_installer, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
cmp_select = []
# display in the widget selection only the companies that haven't been configured yet
unconfigured_cmp = self.get_unconfigured_cmp(cr, uid, context=context)
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id', 'in', unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in self.pool.get('res.company').browse(cr, uid, unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
def on_change_start_date(self, cr, uid, id, start_date=False):
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = (start_date + relativedelta(months=12)) - relativedelta(days=1)
return {'value': {'date_stop': end_date.strftime('%Y-%m-%d')}}
return {}
def execute(self, cr, uid, ids, context=None):
self.execute_simple(cr, uid, ids, context)
return super(account_installer, self).execute(cr, uid, ids, context=context)
def execute_simple(self, cr, uid, ids, context=None):
if context is None:
context = {}
fy_obj = self.pool.get('account.fiscalyear')
for res in self.read(cr, uid, ids, context=context):
if 'date_start' in res and 'date_stop' in res:
f_ids = fy_obj.search(cr, uid, [('date_start', '<=', res['date_start']), ('date_stop', '>=', res['date_stop']), ('company_id', '=', res['company_id'][0])], context=context)
if not f_ids:
name = code = res['date_start'][:4]
if int(name) != int(res['date_stop'][:4]):
name = res['date_start'][:4] + '-' + res['date_stop'][:4]
code = res['date_start'][2:4] + '-' + res['date_stop'][2:4]
vals = {
'name': name,
'code': code,
'date_start': res['date_start'],
'date_stop': res['date_stop'],
'company_id': res['company_id'][0]
}
fiscal_id = fy_obj.create(cr, uid, vals, context=context)
if res['period'] == 'month':
fy_obj.create_period(cr, uid, [fiscal_id])
elif res['period'] == '3months':
fy_obj.create_period3(cr, uid, [fiscal_id])
def modules_to_install(self, cr, uid, ids, context=None):
modules = super(account_installer, self).modules_to_install(
cr, uid, ids, context=context)
chart = self.read(cr, uid, ids, ['charts'],
context=context)[0]['charts']
_logger.debug('Installing chart of accounts %s', chart)
return (modules | set([chart])) - set(['has_default_company', 'configurable'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
MycChiu/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/segment_reduction_ops_test.py
|
15
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SegmentReductionHelper(test.TestCase):
def _input(self, input_shape, dtype=dtypes_lib.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
return constant_op.constant(
values, shape=input_shape, dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_out_rows=None):
if not x.size:
return np.array([])
indices = np.asarray(indices)
if num_out_rows is None:
num_out_rows = indices[-1] + 1
output = [None] * num_out_rows
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if (output[index] is not None) and op1 == np.max:
for j in range(0, output[index].shape[0]):
output[index][j] = op1([output[index][j], x_flat[i][j]])
elif output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalcuated.
# output = [o if o is not None else np.zeros(slice_shape) for o in output]
if not op1 == np.max:
output = [o if o is not None else np.zeros(slice_shape) for o in output]
else:
zeroslice = np.zeros(slice_shape)
zeroslice.fill(dtype.min)
output = [o if o is not None else zeroslice for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.segment_sum), (self._mean_cum_op,
self._mean_reduce_op,
math_ops.segment_mean),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(np.minimum, None, math_ops.segment_min),
(np.maximum, None, math_ops.segment_max)]
# A subset of ops has been enabled for complex numbers
complex_ops_list = [(np.add, None, math_ops.segment_sum),
(np.ndarray.__mul__, None, math_ops.segment_prod)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
with self.test_session(use_gpu=False):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in curr_ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
math_ops.segment_sum(data=tf_x, segment_ids=indices)
def testSegmentIdsSize(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
s.eval()
def testSegmentIdsValid(self):
# This is a baseline for the following SegmentIdsInvalid* tests.
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, 1]
result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsGreaterThanZero(self):
shape = [4, 4]
with self.test_session():
tf_x, np_x = self._input(shape)
indices = [1, 1, 2, 2]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsHole(self):
shape = [4, 4]
with self.test_session():
tf_x, np_x = self._input(shape)
indices = [0, 0, 3, 3]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsInvalid1(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted."):
s.eval()
def testSegmentIdsInvalid2(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing"):
s.eval()
def testSegmentIdsInvalid3(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly "
"because 'segment_ids' input is not sorted."):
s.eval()
def testSegmentIdsInvalid4(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, -1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentIdsInvalid5(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 0, 0, -2]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [
math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min,
math_ops.segment_max
]:
with self.test_session():
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
class UnsortedSegmentSumTest(SegmentReductionHelper):
use_gpu = False
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.test_session(use_gpu=self.use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_out_rows=num_segments)
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testGradientSegmentSum(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
with self.test_session(use_gpu=self.use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
with self.test_session(use_gpu=self.use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
# Results from UnsortedSegmentSum
unsorted_s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
(unsorted_jacob_t, unsorted_jacob_n) = gradient_checker.compute_gradient(
tf_x,
shape,
unsorted_s, [num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
# Results from SegmentSum
sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
sorted_s, [num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3)
def testBadIndices(self):
# Note: GPU kernel does not return the out-of-range error needed for this
# test, so this test is marked as cpu-only.
with self.test_session(use_gpu=False):
for bad in [[-1]], [[7]]:
unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
unsorted.eval()
def testEmptySecondDimension(self):
dtypes = [
np.float32, np.float64, np.int64, np.int32, np.complex64, np.complex128
]
with self.test_session(use_gpu=self.use_gpu):
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
def testGradientSegmentMax(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
with self.test_session():
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = math_ops.unsorted_segment_max(data=tf_x, segment_ids=indices,
num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s,
[num_segments, num_cols],
x_init_value=np_x.astype(np.double), delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
class UnsortedSegmentSumGpuTest(UnsortedSegmentSumTest):
use_gpu = True
class SparseSegmentReductionHelper(SegmentReductionHelper):
def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32):
a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
return (constant_op.constant(
indices, dtype=dtypes_lib.int32), indices, a, b)
def _sparseSegmentReduce(self, x, indices, segment_indices, op1, op2=None):
return self._segmentReduce(segment_indices, x[indices], op1, op2)
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32
]
mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean)]
n = 400
shape = [n, 2]
segment_indices = []
for i in range(20):
for _ in range(i + 1):
segment_indices.append(i)
num_indices = len(segment_indices)
for dtype in dtypes:
with self.test_session(use_gpu=False):
tf_indices, np_indices, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
if tf_op == math_ops.sparse_segment_mean and dtype not in mean_dtypes:
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsHole(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsGreaterThanZero(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [1, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testValid(self):
# Baseline for the test*Invalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
s.eval()
def testIndicesInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, -1, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[1\] == -1 out of range \[0, 10\)"):
s.eval()
def testIndicesInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[3\] == 10 out of range \[0, 10\)"):
s.eval()
def testSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 0, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids are not increasing"):
s.eval()
def testSegmentsInvalid3(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted"):
s.eval()
def testSegmentsInvalid4(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 2\), possibly because "
"'segment_ids' input is not sorted"):
s.eval()
def testSegmentsInvalid6(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentsInvalid7(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testGradient(self):
shape = [10, 4]
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]:
with self.test_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientValid(self):
# Baseline for the testGradient*Invalid* methods below.
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
s.eval()
def testGradientIndicesInvalid1(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"):
s.eval()
def testGradientIndicesInvalid2(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, -1, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"):
s.eval()
def testGradientSegmentsInvalid1(self):
tf_x, _ = self._input(
[3, 4], dtype=dtypes_lib.float32) # expecting 3 segments
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 1, 1] # 2 segments
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError("Invalid number of segments"):
s.eval()
def testGradientSegmentsInvalid2(self):
tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"):
s.eval()
def testGradientSegmentsInvalid3(self):
tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"):
s.eval()
def testGradientSegmentsInvalid4(self):
tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 0 out of range \[0, 0\)"):
s.eval()
if __name__ == "__main__":
test.main()
|
guillaume-havard/pong
|
refs/heads/master
|
src/gui.py
|
1
|
#! /usr/bin/python3
__author__ = 'Guillaume Havard'
import pygame
def print_text(surface, text, text_pos, text_color=pygame.Color(255, 255, 255)):
"""
Print a text on a surface
:param surface: Surface to blit the text on
:param text: texte to print
:param text_pos: tuple of the top left position
:param text_color: color of the text
"""
font_obj = pygame.font.Font("freesansbold.ttf", 32)
msg_surface_obj = font_obj.render(text, False, text_color)
msg_rect_obj = msg_surface_obj.get_rect()
msg_rect_obj.topleft = text_pos
surface.blit(msg_surface_obj, msg_rect_obj)
|
GiantSteps/essentia
|
refs/heads/master
|
src/python/essentia/translate.py
|
10
|
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import inspect, types
import streaming
import _essentia
import common
from streaming import _reloadStreamingAlgorithms
# genetic marker used to track which composite parameters configure which inner algorithms
class MarkerObject(object):
def __init__(self, default_value=None):
self.default_value = default_value
edt_parameter_code = { common.Edt.STRING: 'String',
common.Edt.INTEGER: 'Int',
common.Edt.VECTOR_REAL: 'VectorReal',
common.Edt.VECTOR_VECTOR_REAL: 'VectorVectorReal',
common.Edt.REAL: 'Real',
common.Edt.BOOL: 'Bool'}
edt_cpp_code = { common.Edt.STRING: 'string',
common.Edt.INTEGER: 'int',
common.Edt.VECTOR_INTEGER: 'vector<int>',
common.Edt.VECTOR_STRING: 'vector<string>',
common.Edt.VECTOR_REAL: 'vector<Real>',
common.Edt.VECTOR_VECTOR_REAL: 'vector<vector<Real> >',
common.Edt.REAL: 'Real',
common.Edt.BOOL: 'bool'}
# html code for edt_dot_code:
# < --> less than or <
# > --> greater than or >
# #: --> colon or :
edt_dot_code = { common.Edt.STRING: 'string',
common.Edt.INTEGER: 'int',
common.Edt.VECTOR_INTEGER: 'vector<int>',
common.Edt.VECTOR_STRING: 'vector<string>',
common.Edt.VECTOR_REAL: 'vector<Real>',
common.Edt.VECTOR_VECTOR_REAL: 'vector<vector<Real> >',
common.Edt.REAL: 'Real',
common.Edt.BOOL: 'bool',
common.Edt.STEREOSAMPLE: 'StereoSample',
common.Edt.MATRIX_REAL: 'TNT::Array2D<Real>'} #: -> colon
# finds the EDT of a parameter of an algorithm given the name of the parameter, the marker_obj used
# to configure that parameter, and the configure_log
def find_edt(composite_param_name, marker_obj, configure_log):
# find inner algorithm and inner parameter name that this composite_param will configure
for inner_algo_name, properties in configure_log.iteritems():
for inner_param_name, value in properties['parameters'].iteritems():
if marker_obj == value:
return properties['instance'].paramType(inner_param_name)
raise RuntimeError('Could not determine parameter type of composite algorithm\'s \''+composite_param_name+'\' parameter')
# given a reference to an inner algorithm and the configure_log, returns the name of the algo (use
# lower() if referring to the member var name)
def inner_algo_name(instance, configure_log):
for algo_name, properties in configure_log.iteritems():
if instance == properties['instance']:
return algo_name
raise RuntimeError('Could not find the name of the inner algorithm')
def generate_dot_algo(algo_name, algo_inst):
'''declares an algorithm in dot'''
dot_code = '_'+algo_name.lower()+' [shape="box", style="rounded,filled", fillcolor="grey50", color="transparent", \n'
indent = ' '*len('_'+algo_name)
dot_code += indent+' label=<\n'
indent += ' '*len('label=<')
dot_code += generate_dot_algo_label(algo_inst, indent)
dot_code += '>]\n\n'
return dot_code
def generate_dot_algo_label(algo_inst, indent=''):
### each label of a node consists of algo inputs, algo name,
### configuration parameters and algo outputs
dot_code = indent+'<table border="0"><tr>\n'+\
indent+' <td><table border="0" bgcolor="white">\n'
# add inputs:
if not len(algo_inst.inputNames()): dot_code += ''
else:
for name in algo_inst.inputNames():
typestr = edt_dot_code[ algo_inst.getInputType(name) ]
dot_code += indent+' <tr><td port="'+name+'_i">'+name+'<br/>['+typestr+']</td></tr>\n'
dot_code += indent+' </table></td>\n\n'+\
indent+' <td><table border="0">\n'
# add algo name:
dot_code += indent+' <tr><td valign="top" colspan="2"><font color="white" point-size="18">'+algo_inst.name()+'</font></td></tr>\n'
# add parameters:
if not algo_inst.parameterNames(): dot_code += ''
else:
for name in algo_inst.parameterNames():
value = algo_inst.paramValue(name)
dot_code += indent+' <tr>\n'+\
indent+' <td border="0" valign="top" align="right"><font color="white">'+name+'</font></td>\n'+\
indent+' <td border="0" valign="top" align="left"><font color="white">'+str(value)+'</font></td>\n'+\
indent+' </tr>\n'
dot_code += indent+' </table></td>\n\n'+\
indent+' <td><table border="0" bgcolor="white">\n'
# add outputs:
if not len(algo_inst.outputNames()): dot_code += ''
else:
for name in algo_inst.outputNames():
typestr = edt_dot_code[ algo_inst.getOutputType(name) ]
dot_code += indent+' <tr><td port="'+name+'_o">'+name+'<br/>['+typestr+']</td></tr>\n'
dot_code += indent+' </table></td>\n'+\
indent+'</tr></table>'
return dot_code
def generate_dot_network(configure_log, composite_algo_inst):
# make connections
dot_code ='\n// connecting the network\n'
for algo_name, properties in configure_log.iteritems():
for left_connector, right_connectors in properties['instance'].connections.iteritems():
for right_connector in right_connectors:
if isinstance(right_connector, streaming._StreamConnector):
dot_code += ' _'+inner_algo_name(left_connector.output_algo, configure_log).lower()+':'+left_connector.name+'_o:e'+' -> '+\
'_'+inner_algo_name(right_connector.input_algo, configure_log).lower()+':'+right_connector.name + '_i:w;\n'
if isinstance(right_connector, types.NoneType):
inneralgoname = inner_algo_name(left_connector.output_algo, configure_log).lower()
dot_code += ' nowhere_'+inneralgoname+' [shape="box", style="rounded,filled", fillcolor="grey50", color="transparent" label="Nowhere" fontcolor="white" fontsize="18"];\n'+\
' _'+inneralgoname+':'+left_connector.name+'_o:e'+' -> nowhere_'+inneralgoname+';\n'
# make connections from floating inputs
for name, connector in composite_algo_inst.inputs.iteritems():
innerinputname = connector.name
inneralgoname = inner_algo_name(connector.input_algo, configure_log).lower()
dot_code += ' '+name+':e -> _'+inneralgoname+':'+innerinputname+'_i:w;\n'
# make connections from floating outputs
for name, connector in composite_algo_inst.outputs.iteritems():
inneroutputname = connector.name
inneralgoname = inner_algo_name(connector.output_algo, configure_log).lower()
dot_code += ' _'+inneralgoname+':'+inneroutputname+'_o:e -> '+name+':w;\n'
return dot_code
def generate_dot_cluster(configure_log, clustername, composite_algo_inst):
''' creates a cluster in dot language surrounded by a dashed, lightgrey line'''
dot_code = ' subgraph cluster_0 {\n'\
' color=lightgrey;\n'\
' style=dashed;\n'\
' label='+clustername+';\n\n'
# for each algo in the cluster, declare it in dot:
for algo_name, properties in configure_log.iteritems():
dot_code += generate_dot_algo(algo_name, properties['instance'])
# create the connections
dot_code += generate_dot_network(configure_log, composite_algo_inst)
# close the cluster code
dot_code += ' }\n'
return dot_code
def translate(composite_algo, output_filename, dot_graph=False):
'''Takes in a class that is derived from essentia.streaming.CompositeBase and an output-filename
and writes output-filename.h and output-filename.cpp versions of the given class.'''
if not inspect.isclass(composite_algo):
raise TypeError('"composite_algo" argument must be a class')
if not streaming.CompositeBase in inspect.getmro(composite_algo):
raise TypeError('"composite_algo" argument must inherit from essentia.streaming.CompositeBase')
param_names, _, _, default_values = inspect.getargspec(composite_algo.__init__)
param_names.remove('self')
# these marker objects are used to track where config params travel in the network
marker_objs = {}
if not default_values and param_names: # python vars have no type so we cannot know what type they are!!
raise TypeError('"composite_algo" arguments must have default values')
if param_names:
for param_name, value in zip(param_names, default_values):
marker_objs[param_name] = MarkerObject(value)
### Before we call their function we need to neuter all of the configure methods of each
### streaming algorithm so that our markers won't cause the configure method to vomit
configure_log = {}
def dummy_configure(self, **kwargs):
lbl = 0
algo_name = self.name()+'_'+str(lbl)
# increment lbl to generate a unique name for inner algo
lowered_algo_names = [name.lower() for name in configure_log.keys()]
while algo_name.lower() in lowered_algo_names:
algo_name = algo_name[:algo_name.index('_')+1] + str(lbl)
lbl +=1
# algo_name is now unique
configure_log[algo_name] = {}
configure_log[algo_name]['instance'] = self
configure_log[algo_name]['parameters'] = kwargs
# We need to actually call the internal configure method because algorithms like silencerate
# need to be configured so we can use its outputs. However we can't use our marker objects,
# so we remove the marker objects that don't have a default value associated with them, and
# for those that do have a default value, we use that value instead of the MarkerObject
# itself
kwargs_no_markers = dict(kwargs)
for key, value in kwargs.iteritems():
if value in marker_objs.values():
if value.default_value == None:
del kwargs_no_markers[key]
else:
kwargs_no_markers[key] = value.default_value
self.real_configure(**kwargs_no_markers)
# iterate over all streaming_algos
streaming_algos = inspect.getmembers( streaming,
lambda obj: inspect.isclass(obj) and \
_essentia.StreamingAlgorithm in inspect.getmro(obj) )
streaming_algos = [member[1] for member in streaming_algos]
for algo in streaming_algos:
algo.real_configure = algo.configure
algo.configure = dummy_configure
### Now generate an instance of their composite algorithm ###
algo_inst = composite_algo(**marker_objs)
# overwrite the dummy configure with the real configure method, so
# translate can be called several times in the same file for a different
# compositebase without entering in an infinite loop
for algo in streaming_algos:
algo.configure = algo.real_configure
### Do some checking on their network ###
for algo in [ logitem['instance'] for logitem in configure_log.values() ]:
if isinstance(algo, streaming.VectorInput):
raise TypeError('essentia.streaming.VectorInput algorithms are not allowed for translatable composite algorithms')
if isinstance(algo, streaming.AudioLoader) or \
isinstance(algo, streaming.EasyLoader) or \
isinstance(algo, streaming.MonoLoader) or \
isinstance(algo, streaming.EqloudLoader):
raise TypeError('No type of AudioLoader is allowed for translatable composite algorithms')
if isinstance(algo, streaming.AudioWriter) or \
isinstance(algo, streaming.MonoWriter):
raise TypeError('No type of AudioWriter is allowed for translatable composite algorithms')
if isinstance(algo, streaming.FileOutput):
raise TypeError('essentia.streaming.FileOutput algorithms are not allowed for translatable composite algorithms')
def sort_by_key(configure_log):
# sort algorithms and conf values:
sitems = configure_log.items()
sitems.sort()
sorted_algos = []
sorted_params= []
for k,v in sitems:
sorted_params.append(v)
sorted_algos.append(k)
return sorted_algos, sorted_params
sorted_algos, sorted_params = sort_by_key(configure_log)
### generate .h code ###
h_code = '''// Generated automatically by essentia::translate
#ifndef STREAMING_''' + composite_algo.__name__.upper() + '''
#define STREAMING_''' + composite_algo.__name__.upper()+ '''
#include "streamingalgorithmcomposite.h"
class '''+composite_algo.__name__+''' : public essentia::streaming::AlgorithmComposite {
protected:
'''
for algo_name in sorted_algos:
h_code += ' essentia::streaming::Algorithm* _'+algo_name.lower()+';\n'
h_code += '''
public:
'''+composite_algo.__name__+'''();
~'''+composite_algo.__name__+'''() {
'''
for algo_name in sorted_algos:
h_code += ' delete _'+algo_name.lower()+';\n'
h_code += ''' }
void declareParameters() {
'''
if param_names:
for param_name, default_value in zip(param_names, default_values):
h_code += ' declareParameter("'+param_name+'", "", "", '
if isinstance(default_value, basestring): h_code += '"'+default_value+'"'
else: h_code += str(default_value)
h_code += ');\n'
h_code += ''' }
void configure();
void createInnerNetwork();
void reset();
static const char* name;
static const char* version;
static const char* description;
};
#endif
'''
### Generate .cpp code ###
cpp_code = '''// Generated automatically by essentia::translate
#include "'''+output_filename+'''.h"
#include "algorithmfactory.h"
#include "taskqueue.h"
using namespace std;
using namespace essentia;
using namespace essentia::streaming;
const char* '''+composite_algo.__name__+'''::name = "'''+composite_algo.__name__+'''";
const char* '''+composite_algo.__name__+'''::version = "1.0";
const char* '''+composite_algo.__name__+'''::description = DOC("");\n\n'''
################################
# CONSTRUCTOR
################################
cpp_code += composite_algo.__name__+'''::'''+composite_algo.__name__+'''(): '''
for algo_name in sorted_algos: cpp_code += '_' + algo_name.lower() + '(0), '
cpp_code = cpp_code[:-2] + ''' {
setName("''' + composite_algo.__name__ + '''");
declareParameters();
AlgorithmFactory& factory = AlgorithmFactory::instance();\n\n'''
# create inner algorithms
for algo_name in sorted_algos:
cpp_code += ' _'+algo_name.lower()+' = factory.create("'+algo_name[:algo_name.rindex('_')]+'");\n'
cpp_code+='}\n\n'
################################
# INNER NETWORK
################################
# declaration of inputs and output and connecting the network should not be
# done in the constructor, as there are algos like silencerate which
# inputs/outputs depend on the configuration parameters. Hence, it is safer to
# do it in the configure() function
cpp_code += 'void ' + composite_algo.__name__ + '::createInnerNetwork() {\n'
# declare inputs
for input_alias, connector in algo_inst.inputs.iteritems():
input_owner_name = None
input_name = None
for algo_name, properties in zip(sorted_algos, sorted_params): #configure_log.iteritems():
if properties['instance'] == connector.input_algo:
input_owner_name = algo_name
input_name = connector.name
break
if not input_owner_name:
raise RuntimeError('Could not determine owner of the \''+input_alias+'\' input')
cpp_code += ' declareInput(_'+input_owner_name.lower()+'->input("'+input_name+'"), "'+input_alias+'", "");\n'
cpp_code += '\n'
# declare outputs
aliases, connectors = sort_by_key(algo_inst.outputs)
for output_alias, connector in zip(aliases, connectors):
output_owner_name = None
output_name = None
for algo_name, properties in zip(sorted_algos, sorted_params): #configure_log.iteritems():
if properties['instance'] == connector.output_algo:
output_owner_name = algo_name
output_name = connector.name
break
if not output_owner_name:
raise RuntimeError('Could not determine owner of the \''+output_alias+'\' output')
cpp_code += ' declareOutput(_'+output_owner_name.lower()+'->output("'+output_name+'"), "'+output_alias+'", "");\n'
cpp_code += '\n'
# make connections
for algo_name, properties in zip(sorted_algos, sorted_params): #configure_log.iteritems():
for left_connector, right_connectors in properties['instance'].connections.iteritems():
for right_connector in right_connectors:
if isinstance(right_connector, streaming._StreamConnector):
cpp_code += ' connect( _'+\
inner_algo_name(left_connector.output_algo, configure_log).lower() + \
'->output("'+left_connector.name+'"), _' + \
inner_algo_name(right_connector.input_algo, configure_log).lower() + \
'->input("'+right_connector.name+'") );\n'
elif isinstance(right_connector, types.NoneType):
cpp_code += ' connect( _'+\
inner_algo_name(left_connector.output_algo, configure_log).lower() + \
'->output("'+left_connector.name+'"), NOWHERE );\n'
cpp_code = cpp_code[:-1]
cpp_code += '''
}\n\n'''
################################
# CONFIGURE
################################
cpp_code += 'void '+composite_algo.__name__+'::configure() {\n'
# configure method
# create local variable for every composite parameter
for composite_param_name in param_names:
param_edt = find_edt(composite_param_name, marker_objs[composite_param_name], configure_log)
cpp_code += ' '+edt_cpp_code[param_edt]+' '+composite_param_name + \
' = parameter("'+composite_param_name+'").to' + \
edt_parameter_code[param_edt]+'();\n'
cpp_code += '\n'
# configure inner algorithms
for algo_name, properties in zip(sorted_algos, sorted_params): #configure_log.iteritems():
# skip if inner algorithm wasn't configured explicitly
if not properties['parameters']: continue
for param_name, value in properties['parameters'].iteritems():
type = common.determineEdt(value)
if 'LIST' in str(type) or 'VECTOR' in str(type):
if type in [common.Edt.VECTOR_STRING]:
cpp_code += ' const char* ' + param_name + '[] = {'
for s in value: cpp_code += '\"' + s + '\"' + ','
elif type in[common.Edt.VECTOR_REAL, common.Edt.LIST_REAL]:
cpp_code += ' Real ' + param_name + '[] = {'
for f in value: cpp_code += str(f) + ','
elif type in [common.Edt.VECTOR_INT, common.Edt.LIST_INT]:
cpp_code += ' int' + param_name + '[] = {'
for i in value: cpp_code += str(i) + ','
cpp_code = cpp_code[:-1]+'};\n'
cpp_code += ' _'+algo_name.lower()+'->configure('
for param_name, value in properties['parameters'].iteritems():
if isinstance(value, MarkerObject):
# figure out which composite param it is
composite_param_name = None
for marker_name, marker_obj in marker_objs.iteritems():
if marker_obj == value:
composite_param_name = marker_name
break
if not composite_param_name:
raise RuntimeError('Could not determine which composite parameter to use to configure inner algorithm \''+algo_name+'\'s parameter \''+param_name+'\'')
cpp_code += '"'+param_name+'", '+composite_param_name+', '
else:
type = common.determineEdt(value)
if 'LIST' in str(type) or 'VECTOR' in str(type):
if type in [common.Edt.VECTOR_STRING]:
cpp_code += '"'+param_name+'", '+'arrayToVector<string>(' + param_name + ') '
elif type in[common.Edt.VECTOR_REAL, common.Edt.LIST_REAL]:
cpp_code += '"'+param_name+'", '+'arrayToVector<Real>(' + param_name + ') '
elif type in [common.Edt.VECTOR_INT, common.Edt.LIST_INT]:
cpp_code += '"'+param_name+'", '+'arrayToVector<int>(' + param_name + ') '
elif isinstance(value, basestring):
cpp_code += '"'+param_name+'", "'+value+'", '
elif isinstance(value, bool):
if value: cpp_code += '"'+param_name+'", true, '
else: cpp_code += '"'+param_name+'", false, '
else:
cpp_code += '"'+param_name+'", '+str(value)+', '
cpp_code = cpp_code[:-2] + ');\n'
cpp_code += ' createInnerNetwork();\n}\n\n'
################################
# RESET
################################
cpp_code += 'void '+composite_algo.__name__+'::reset() {\n'
for algo_name in sorted_algos:
cpp_code += ' _' + algo_name.lower() + '->reset();\n'
cpp_code += '}\n\n'
################################
# DESTRUCTOR
################################
# see h_code. Each algo from the composite is deleted separately instead of
# calling deleteNetwork
# cpp_code += composite_algo.__name__+'''::~'''+composite_algo.__name__+'''() {
# deleteNetwork(_''' + input_owner_name.lower() + ''');
#}'''
# cpp_code +='\n'
#
################################
# end of cpp code
################################
if dot_graph:
### generate .dot code ###
dot_code = 'digraph ' + output_filename +' {\n'
dot_code += ' rankdir=LR\n' # if instead of top-down left-right is prefered
# general formatting options:
dot_code += ' node [color=black, fontname=Verdana, weight=1, fontsize=8, shape=Mrecord]\n'
dot_code += ' edge [color=black, style=solid, weight=1, arrowhead="dotnormal", arrowtail="dot", arrowsize=1, fontsize=6]\n'
# for each input generate nodes
for name in algo_inst.inputs.keys():
dot_code += ' '+name+' [label="'+name+'"];\n'
dot_code += generate_dot_cluster(configure_log, composite_algo.__name__, algo_inst)
# for each output generate nodes
for name in algo_inst.outputs.keys():
dot_code += ' '+name+' [label="'+name+'"];\n'
dot_code += '}'
### Write files ###
f = open(output_filename+'.h', 'w')
f.write(h_code)
f.close()
f = open(output_filename+'.cpp', 'w')
f.write(cpp_code)
f.close()
if dot_graph:
f = open(output_filename+'.dot', 'w')
f.write(dot_code)
f.close()
|
ychen820/microblog
|
refs/heads/master
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/db/models/sql/datastructures.py
|
118
|
"""
Useful auxilliary data structures for query construction. Not useful outside
the SQL domain.
"""
class EmptyResultSet(Exception):
pass
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, level):
self.level = level
class Empty(object):
pass
class RawValue(object):
def __init__(self, value):
self.value = value
class Date(object):
"""
Add a date selection column.
"""
def __init__(self, col, lookup_type):
self.col = col
self.lookup_type = lookup_type
def relabel_aliases(self, change_map):
c = self.col
if isinstance(c, (list, tuple)):
self.col = (change_map.get(c[0], c[0]), c[1])
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple([qn(c) for c in self.col])
else:
col = self.col
return connection.ops.date_trunc_sql(self.lookup_type, col)
|
dessHub/bc-14-online-store-application
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.py
|
456
|
from __future__ import absolute_import, division, unicode_literals
from .py import Trie as PyTrie
Trie = PyTrie
# pylint:disable=wrong-import-position
try:
from .datrie import Trie as DATrie
except ImportError:
pass
else:
Trie = DATrie
# pylint:enable=wrong-import-position
|
Thomas-Tsai/od2ckan
|
refs/heads/master
|
map2ckan.py
|
1
|
#!/bin/python
# -*- coding: utf-8 -*-
import organization_map
class mapod2ckan():
def __init__(self):
self.package={'extras':[], 'tag':[], 'resources':[], 'org':{'extras':[]}}
self.license_id='1'
self.license_url='http'
def map_package_params(self, key, value):
self.package[key] = value
def map_tag_params(self, key, value):
for keyword in value:
testkeyword = keyword.encode('utf-8')
if testkeyword.isalpha() == True:
testkeyword = testkeyword.lower()
testkeyword = testkeyword.replace(" ", "_")
tagdata={'name':testkeyword.lower()}
self.package['tag'].append(tagdata)
#else:
#map_package_extras(k, v)
def map_organization_params(self, key, value):
if key == 'organization':
org = organization_map.organization_name()
owner_org = org.search(value.encode('utf8'))
self.package['owner_org'] = owner_org
self.package['org']['name'] = owner_org
self.package['org']['title'] = value.encode('utf-8')
else:
org_extra={}
org_extra['key'] = key.encode('utf-8')
org_extra['value'] = value.encode('utf-8')
self.package['org']['extras'].append(org_extra)
def map_package_extras(self, key, value):
if key == 'notes':
key="extra note"
if type(value) is int:
data=value
else:
data = value.encode('utf-8')
extra={}
extra['key']=key.encode('utf-8')
extra['value']=data
self.package['extras'].append(extra)
def map_resources_params(self, key, value):
for data in value:
resource={'resourceid':'', 'resourcedescription':'', 'format':'', 'resourcemodified':'', 'extras':{}}
for rk, rv in data.items():
if rk == 'resourceID':
resource['resourceid'] = rv
elif rk == 'resourceDescription':
resource['resourcedescription'] = rv
elif rk == 'format':
resource['format'] = rv
elif rk == 'resourceModified':
resource['resourcemodified'] = rv
else:
resource['extras'][rk] = rv
self.package['resources'].append(resource)
def map(self, data):
rs = data
for k,v in rs.items():
if k == 'title':
self.map_package_params('title', v)
elif k == 'identifier':
self.map_package_params('name', v)
elif k == 'description':
self.map_package_params('notes', v)
elif k == 'type':
self.map_package_params('type', v)
elif k == 'publisher':
self.map_package_params('owner_org', v)
elif k == 'modified':
self.map_package_params('last_modified', v)
elif k == 'license':
self.map_package_params('license_id', self.license_id)
elif k == 'license_URL':
self.map_package_params('license_url', self.license_url)
elif k == 'organization':
self.map_organization_params(k, v)
elif k == 'organizationContactName':
self.map_organization_params(k, v)
elif k == 'organizationContactPhone':
self.map_organization_params(k, v)
elif k == 'organizationContactEmail':
self.map_organization_params(k, v)
elif k == 'contactName':
self.map_package_params('author', v)
elif k == 'publisherContactName':
self.map_package_params('author', v)
elif k == 'contactEmail':
self.map_package_params('author_email', v)
elif k == 'publisherContactEmail':
self.map_package_params('author_email', v)
elif k == 'landingPage':
self.map_organization_params(k, v)
elif k == 'keyword':
self.map_tag_params(k, v)
elif k == 'distribution':
self.map_resources_params(k, v)
elif k == 'Comments':
continue
else:
self.map_package_extras(k, v)
return self.package
|
ltilve/chromium
|
refs/heads/igalia-sidebar
|
tools/clang/scripts/posix-print-revision.py
|
55
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
# GN only supports shelling to python. Until update.py is used on all
# platforms (currently only Windows), wrap update.sh.
sys.exit(os.system(os.path.join(os.path.dirname(__file__), 'update.sh') +
' --print-revision'))
|
ramitalat/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/tiny_socket.py
|
386
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import socket
import cPickle
import cStringIO
import marshal
class Myexception(Exception):
def __init__(self, faultCode, faultString):
self.faultCode = faultCode
self.faultString = faultString
self.args = (faultCode, faultString)
class mysocket:
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.sock.settimeout(120)
def connect(self, host, port=False):
if not port:
protocol, buf = host.split('//')
host, port = buf.split(':')
self.sock.connect((host, int(port)))
def disconnect(self):
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
def mysend(self, msg, exception=False, traceback=None):
msg = cPickle.dumps([msg,traceback])
size = len(msg)
self.sock.send('%8d' % size)
self.sock.send(exception and "1" or "0")
totalsent = 0
while totalsent < size:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError, "Socket connection broken."
totalsent = totalsent + sent
def myreceive(self):
buf=''
while len(buf) < 8:
chunk = self.sock.recv(8 - len(buf))
if chunk == '':
raise RuntimeError, "Socket connection broken."
buf += chunk
size = int(buf)
buf = self.sock.recv(1)
if buf != "0":
exception = buf
else:
exception = False
msg = ''
while len(msg) < size:
chunk = self.sock.recv(size-len(msg))
if chunk == '':
raise RuntimeError, "Socket connection broken."
msg = msg + chunk
msgio = cStringIO.StringIO(msg)
unpickler = cPickle.Unpickler(msgio)
unpickler.find_global = None
res = unpickler.load()
if isinstance(res[0],Exception):
if exception:
raise Myexception(str(res[0]), str(res[1]))
raise res[0]
else:
return res[0]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
decentfox/aiohttp
|
refs/heads/master
|
tests/test_web_request_handler.py
|
5
|
import pytest
from aiohttp import web
from unittest import mock
def test_repr(loop):
app = web.Application(loop=loop)
manager = app.make_handler()
handler = manager()
assert '<RequestHandler none:none disconnected>' == repr(handler)
handler.transport = object()
handler._meth = 'GET'
handler._path = '/index.html'
assert '<RequestHandler GET:/index.html connected>' == repr(handler)
def test_connections(loop):
app = web.Application(loop=loop)
manager = app.make_handler()
assert manager.connections == []
handler = object()
transport = object()
manager.connection_made(handler, transport)
assert manager.connections == [handler]
manager.connection_lost(handler, None)
assert manager.connections == []
@pytest.mark.run_loop
def test_finish_connection_no_timeout(loop):
app = web.Application(loop=loop)
manager = app.make_handler()
handler = mock.Mock()
transport = mock.Mock()
manager.connection_made(handler, transport)
yield from manager.finish_connections()
manager.connection_lost(handler, None)
assert manager.connections == []
handler.closing.assert_called_with(timeout=None)
transport.close.assert_called_with()
@pytest.mark.run_loop
def test_finish_connection_timeout(loop):
app = web.Application(loop=loop)
manager = app.make_handler()
handler = mock.Mock()
transport = mock.Mock()
manager.connection_made(handler, transport)
yield from manager.finish_connections(timeout=0.1)
manager.connection_lost(handler, None)
assert manager.connections == []
handler.closing.assert_called_with(timeout=0.09)
transport.close.assert_called_with()
|
piquadrat/django
|
refs/heads/master
|
django/core/files/storage.py
|
21
|
import os
from datetime import datetime
from urllib.parse import urljoin
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.core.signals import setting_changed
from django.utils import timezone
from django.utils._os import safe_join
from django.utils.crypto import get_random_string
from django.utils.deconstruct import deconstructible
from django.utils.encoding import filepath_to_uri
from django.utils.functional import LazyObject, cached_property
from django.utils.module_loading import import_string
from django.utils.text import get_valid_filename
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage:
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""Retrieve the specified file from storage."""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Save new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content, name)
name = self.get_available_name(name, max_length=max_length)
return self._save(name, content)
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Return a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name, max_length=None):
"""
Return a filename that's free on the target storage system and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a random 7
# character alphanumeric string (before the file extension, if one
# exists) to the filename until the generated filename doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
'Please make sure that the corresponding file field '
'allows sufficient "max_length".' % name
)
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
return name
def generate_filename(self, filename):
"""
Validate the filename by calling get_valid_name() and return a filename
to be passed to the save() method.
"""
# `filename` may include a path as returned by FileField.upload_to.
dirname, filename = os.path.split(filename)
return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))
def path(self, name):
"""
Return a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Delete the specified file from the storage system.
"""
raise NotImplementedError('subclasses of Storage must provide a delete() method')
def exists(self, name):
"""
Return True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError('subclasses of Storage must provide an exists() method')
def listdir(self, path):
"""
List the contents of the specified path. Return a 2-tuple of lists:
the first item being directories, the second item being files.
"""
raise NotImplementedError('subclasses of Storage must provide a listdir() method')
def size(self, name):
"""
Return the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a size() method')
def url(self, name):
"""
Return an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError('subclasses of Storage must provide a url() method')
def get_accessed_time(self, name):
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError('subclasses of Storage must provide a get_created_time() method')
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method')
@deconstructible
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None):
self._location = location
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
setting_changed.connect(self._clear_cached_properties)
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == 'MEDIA_ROOT':
self.__dict__.pop('base_location', None)
self.__dict__.pop('location', None)
elif setting == 'MEDIA_URL':
self.__dict__.pop('base_url', None)
elif setting == 'FILE_UPLOAD_PERMISSIONS':
self.__dict__.pop('file_permissions_mode', None)
elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':
self.__dict__.pop('directory_permissions_mode', None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return os.path.abspath(self.base_location)
@cached_property
def base_url(self):
if self._base_url is not None and not self._base_url.endswith('/'):
self._base_url += '/'
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS)
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
if self.directory_permissions_mode is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with file_permissions_mode behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, self.directory_permissions_mode)
finally:
os.umask(old_umask)
else:
os.makedirs(directory)
except FileNotFoundError:
# There's a race between os.path.exists() and os.makedirs().
# If os.makedirs() fails with FileNotFoundError, the directory
# was created concurrently.
pass
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except FileExistsError:
# A new name is needed if the file exists.
name = self.get_available_name(name)
full_path = self.path(name)
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
# Store filenames with forward slashes, even on Windows.
return name.replace('\\', '/')
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file or directory exists, delete it from the filesystem.
try:
if os.path.isdir(name):
os.rmdir(name)
else:
os.remove(name)
except FileNotFoundError:
# FileNotFoundError is raised if the file or directory was removed
# concurrently.
pass
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip('/')
return urljoin(self.base_url, url)
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(ts).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ts)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
|
pescobar/easybuild-framework
|
refs/heads/master
|
test/framework/sandbox/easybuild/easyblocks/f/foofoo.py
|
2
|
##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing foofoo, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.easyblocks.foo import EB_foo
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
class EB_foofoo(EB_foo):
"""Support for building/installing foofoo."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for foofoo."""
extra_vars = {
'foofoo_extra1': [None, "first foofoo-specific easyconfig parameter (mandatory)", MANDATORY],
'foofoo_extra2': ['FOOFOO', "second foofoo-specific easyconfig parameter", CUSTOM],
}
return EB_foo.extra_options(extra_vars)
|
lach76/scancode-toolkit
|
refs/heads/develop
|
tests/commoncode/test_functional.py
|
6
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from unittest.case import TestCase
from commoncode.functional import flatten
class TestFunctional(TestCase):
def test_flatten(self):
expected = [7, 6, 5, 4, 'a', 3, 3, 2, 1]
test = flatten([7, (6, [5, [4, ["a"], 3]], 3), 2, 1])
assert expected == test
def test_flatten_generator(self):
def gen():
for _ in range(2):
yield range(5)
expected = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
test = flatten(gen())
assert expected == test
def test_flatten_empties(self):
expected = ['a']
test = flatten([[], (), ['a']])
assert expected == test
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.