code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2015-2018: # Matthieu Estrada, ttamalfor@gmail.com # # This file is part of (AlignakApp). # # (AlignakApp) is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # (AlignakApp) is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>. """ Frames ++++++ Frames manage global QFrames for Alignak-app """ from logging import getLogger from PyQt5.Qt import QFrame logger = getLogger(__name__) def get_frame_separator(vertical=False): """ Return a frame separator :param vertical: define if separator is vertical or horizontal :type vertical: bool :return: frame separator :rtype: QFrame """ line = QFrame() if vertical: line.setObjectName('vseparator') line.setFrameShape(QFrame.VLine) else: line.setObjectName('hseparator') line.setFrameShape(QFrame.HLine) return line
Alignak-monitoring-contrib/alignak-app
alignak_app/qobjects/common/frames.py
Python
agpl-3.0
1,441
import sys import serial from time import sleep def test_feel(arg_1, arg_2, arg_3, arg_4): serial_port = str(arg_1) baud_rate = int(arg_2) time_out = int(arg_3) parameter = int(arg_4) device = serial.Serial(serial_port, baud_rate, timeout=time_out) for i in range(0, 10000): device.write('1/1/11/0/') device.write('1/2/22/1/') device.write('1/2/24/2/') device.write('2/2/23/') x = int(device.readline().strip()) print(x) if x == 1: device.write('1/2/9/2/') else: device.write('1/2/9/1/') device.write('1/1/9/0/') device.write('1/2/22/1/') device.write('1/1/24/1/') device.close() if __name__ == "__test_feel__": test_feel(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
DeRaafMedia/ProjectIRCInteractivity
skills/test_feel_function.py
Python
artistic-2.0
816
#!/usr/bin/env python # coding=utf-8 import hashlib import requests import time from .binance_exceptions import BinanceAPIException from .binance_validation import validate_order from urllib import urlencode class Client(object): """ https://www.binance.com/cn/fee/schedule 手续费 """ API_URL = 'https://www.binance.com/api' WEBSITE_URL = 'https://www.binance.com' API_VERSION = 'v1' _products = None def __init__(self, api_key, api_secret): self.API_KEY = api_key self.API_SECRET = api_secret self.session = self._init_session() # init DNS and SSL cert self.ping() self.get_products() def _init_session(self): session = requests.session() session.headers.update({'Accept': 'application/json', 'User-Agent': 'binance/python', 'X-MBX-APIKEY': self.API_KEY}) return session def _create_api_uri(self, path): return self.API_URL + '/' + self.API_VERSION + '/' + path def _create_website_uri(self, path): return self.WEBSITE_URL + '/' + path def _generate_signature(self, data): query_string = urlencode(data) m = hashlib.sha256() m.update((self.API_SECRET + '|' + query_string).encode()) return m.hexdigest() def _request(self, method, path, signed, **kwargs): uri = self._create_api_uri(path) data = kwargs.get('data', None) if data and isinstance(data, dict): kwargs['data'] = data if signed: # generate signature kwargs['data']['timestamp'] = int(time.time() * 1000) kwargs['data']['signature'] = self._generate_signature(kwargs['data']) if data and method == 'get': kwargs['params'] = kwargs['data'] del (kwargs['data']) response = getattr(self.session, method)(uri, **kwargs) return self._handle_response(response) def _request_website(self, method, path, **kwargs): uri = self._create_website_uri(path) data = kwargs.get('data', None) if data and isinstance(data, dict): kwargs['data'] = data if data and method == 'get': kwargs['params'] = kwargs['data'] del (kwargs['data']) response = getattr(self.session, method)(uri, **kwargs) return self._handle_response(response) def _handle_response(self, response): """Internal helper for handling API responses from the Binance server. Raises the appropriate exceptions when necessary; otherwise, returns the response. """ if not str(response.status_code).startswith('2'): raise BinanceAPIException(response) return response.json() def _get(self, path, signed=False, **kwargs): return self._request('get', path, signed, **kwargs) def _post(self, path, signed=False, **kwargs): return self._request('post', path, signed, **kwargs) def _put(self, path, signed=False, **kwargs): return self._request('put', path, signed, **kwargs) def _delete(self, path, signed=False, **kwargs): return self._request('delete', path, signed, **kwargs) def _parse_products(self, products): """ Parse the response from get_products to use for validation :param products: :return: """ self._products = {} if 'data' in products: products = products['data'] for p in products: self._products[p['symbol']] = p # Website Endpoints def get_products(self): """ Return list of products currently listed on Binance :return: """ products = self._request_website('get', 'exchange/public/product') self._parse_products(products) return products # General Endpoints def ping(self): """ Test connectivity to the Rest API. https://www.binance.com/restapipub.html#test-connectivity :return: """ return self._get('ping') def get_server_time(self): """ Test connectivity to the Rest API and get the current server time. https://www.binance.com/restapipub.html#check-server-time :return: """ return self._get('time') # Market Data Endpoints def get_all_tickers(self): """ Get last price for all markets :return: """ return self._get('ticker/allPrices') def get_orderbook_tickers(self): """ Get first bid and ask entry in the order book for all markets :return: """ return self._get('ticker/allBookTickers') def get_order_book(self, **params): """ Get the Order Book for the market https://www.binance.com/restapipub.html#order-book :param params: symbol - required limit - Default 100; max 100 :return: """ return self._get('depth', data=params) def get_aggregate_trades(self, **params): """ Get compressed, aggregate trades. Trades that fill at the time, from the same order, with the same price will have the quantity aggregated. https://www.binance.com/restapipub.html#compressedaggregate-trades-list :param params: symbol - required fromId - ID to get aggregate trades from INCLUSIVE. startTime - Timestamp in ms to get aggregate trades from INCLUSIVE. endTime - Timestamp in ms to get aggregate trades until INCLUSIVE. limit - Default 500; max 500. :return: """ return self._get('aggTrades', data=params) def get_klines(self, **params): """ Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time. https://www.binance.com/restapipub.html#klinecandlesticks :param params: symbol - required interval - enum limit - Default 500; max 500. startTime - endTime - :return: """ return self._get('klines', data=params) def get_ticker(self, **params): """ 24 hour price change statistics. https://www.binance.com/restapipub.html#24hr-ticker-price-change-statistics :param params: symbol - required :return: """ return self._get('ticker/24hr', data=params) # Account Endpoints def create_order(self, disable_validation=False, **params): """ Send in a new order https://www.binance.com/restapipub.html#new-order--signed :param disable_validation: disable client side order validation :param params: symbol - required side - required type - required timeInForce - required if limit order quantity - required price - required if limit order newClientOrderId - A unique id for the order. Automatically generated if not sent. stopPrice - Used with stop orders icebergQty - Used with iceberg orders :return: """ if not disable_validation: validate_order(params, self._products) return self._post('order', True, data=params) def create_test_order(self, disable_validation=False, **params): """ Test new order creation and signature/recvWindow long. Creates and validates a new order but does not send it into the matching engine. https://www.binance.com/restapipub.html#test-new-order-signed :param disable_validation: disable client side order validation :param params: symbol - required side - required enum type - required enum timeInForce - required if limit order quantity - required price - required if limit order newClientOrderId - A unique id for the order. Automatically generated if not sent. stopPrice - Used with stop orders icebergQty - Used with iceberg orders recvWindow - the number of milliseconds the request is valid for :return: """ if not disable_validation: validate_order(params, self._products) return self._post('order/test', True, data=params) def get_order(self, **params): """ Check an order's status. Either orderId or origClientOrderId must be sent. https://www.binance.com/restapipub.html#query-order-signed :param params: symbol - required orderId - The unique order id origClientOrderId - The unique order id recvWindow - the number of milliseconds the request is valid for :return: """ return self._get('order', True, data=params) def get_all_orders(self, **params): """ Get all account orders; active, canceled, or filled. https://www.binance.com/restapipub.html#all-orders-signed :param params: symbol - required orderId - The unique order id limit - Default 500; max 500. recvWindow - the number of milliseconds the request is valid for :return: """ return self._get('allOrders', True, data=params) def cancel_order(self, **params): """ Cancel an active order. https://www.binance.com/restapipub.html#all-orders-signed :param params: symbol - required orderId - If orderId is set, it will get orders >= that orderId. Otherwise most recent orders are returned. origClientOrderId - The unique order id newClientOrderId - The unique order id recvWindow - the number of milliseconds the request is valid for :return: """ return self._delete('order', True, data=params) def get_open_orders(self, **params): """ Get all open orders on a symbol. https://www.binance.com/restapipub.html#current-open-orders-signed :param params: symbol - required recvWindow - the number of milliseconds the request is valid for :return: """ return self._get('openOrders', True, data=params) # User Stream Endpoints def get_account(self, **params): """ Get current account information. https://www.binance.com/restapipub.html#account-information-signed :param params: recvWindow - the number of milliseconds the request is valid for :return: """ return self._get('account', True, data=params) def get_my_trades(self, **params): """ Get trades for a specific account and symbol. https://www.binance.com/restapipub.html#account-trade-list-signed :param params: symbol - required limit - Default 500; max 500. fromId - TradeId to fetch from. Default gets most recent trades. recvWindow - the number of milliseconds the request is valid for :return: """ return self._get('myTrades', True, data=params) # User Stream Endpoints def stream_get_listen_key(self): """ Start a new user data stream and return the listen key https://www.binance.com/restapipub.html#start-user-data-stream-api-key :return: """ res = self._post('userDataStream', False, data={}) return res['listenKey'] def stream_keepalive(self, **params): """ PING a user data stream to prevent a time out. https://www.binance.com/restapipub.html#keepalive-user-data-stream-api-key :return: """ return self._put('userDataStream', False, data=params) def stream_close(self, **params): """ Close out a user data stream. https://www.binance.com/restapipub.html#close-user-data-stream-api-key :return: """ return self._delete('userDataStream', False, data=params)
doubleDragon/QuantBot
quant/api/binance.py
Python
mit
12,224
from math import sqrt import gtk from gettext import gettext as _ from ase.gui.widgets import pack, Help class Constraints(gtk.Window): def __init__(self, gui): gtk.Window.__init__(self) self.set_title(_('Constraints')) vbox = gtk.VBox() b = pack(vbox, [gtk.Button(_('Constrain')), gtk.Label(_(' selected atoms'))])[0] b.connect('clicked', self.selected) b = pack(vbox, [gtk.Button(_('Constrain')), gtk.Label(_(' immobile atoms:'))])[0] b.connect('clicked', self.immobile) b = pack(vbox, [gtk.Button(_('Unconstrain')), gtk.Label(_(' selected atoms:'))])[0] b.connect('clicked', self.unconstrain) b = pack(vbox, gtk.Button(_('Clear constraints'))) b.connect('clicked', self.clear) close = pack(vbox, gtk.Button(_('Close'))) close.connect('clicked', lambda widget: self.destroy()) self.add(vbox) vbox.show() self.show() self.gui = gui def selected(self, button): self.gui.images.dynamic[self.gui.images.selected] = False self.gui.draw() def unconstrain(self, button): self.gui.images.dynamic[self.gui.images.selected] = True self.gui.draw() def immobile(self, button): self.gui.images.set_dynamic() self.gui.draw() def clear(self, button): self.gui.images.dynamic[:] = True self.gui.draw()
grhawk/ASE
tools/ase/gui/constraints.py
Python
gpl-2.0
1,497
""" Tests for Serializer Fields """ from django.core.exceptions import ImproperlyConfigured from django.test import TestCase import pytest from rest_framework.serializers import ValidationError from courses.factories import EdxAuthorFactory, CourseFactory from courses.models import EdxAuthor from courses.serializers import JsonListField as JLF from courses.serializers import StringyManyToManyField as SMMF class JsonListFieldTests(TestCase): """ Tests for JsonListField """ def test_decodes_string(self): """ Test that empty list string decodes properly """ f = JLF() self.assertEqual([], f.to_internal_value('[]')) def test_decodes_unicode(self): """ Test that empty list unicode string decodes properly """ f = JLF() self.assertEqual([], f.to_internal_value(u'[]')) def test_handles_decoding_nullable_values(self): """ Test that null is decoded to None """ f = JLF() self.assertEqual(None, f.to_internal_value('null')) def test_throws_validationerror_on_invalid_json(self): """ Test invalid JSON """ f = JLF() self.assertRaises(ValidationError, f.to_internal_value, 'testing') def test_not_list(self): """ Test that to_internal_value takes only lists """ f = JLF() self.assertRaises(ValidationError, f.to_internal_value, '{}') class StringyM2MTestCase(TestCase): """Tests for m2m stringy field serializer""" def test_requires_model(self): """Field requires a model kwarg""" self.assertRaises(ImproperlyConfigured, SMMF, lookup='test') def test_requires_lookup(self): """Field requires a lookup kwarg""" self.assertRaises(ImproperlyConfigured, SMMF, model=EdxAuthor) def test_returns_string_for_all_objects(self): # pylint: disable=no-self-use """model-to-string returns correct strings""" e1 = EdxAuthorFactory.create() e2 = EdxAuthorFactory.create() co = CourseFactory.create() co.instructors.add(e1) co.instructors.add(e2) f = SMMF(model=EdxAuthor, lookup='edx_uid') assert sorted([str(e1), str(e2)]) == sorted(f.to_representation(co.instructors)) def test_returns_model_if_string_provided(self): # pylint: disable=no-self-use """string-to-model returns correct model for single string""" uid = '2d133482b3214a119f55c3060d882ceb' CourseFactory.create() f = SMMF(model=EdxAuthor, lookup='edx_uid') ms = f.to_internal_value(uid) assert len(ms) == 1 assert ms[0].edx_uid == uid def test_returns_models_if_list_provided(self): # pylint: disable=no-self-use """string-to-model returns correct model for list""" uid = '2d133482b3214a119f55c3060d882ceb' uid2 = '3d133482b3214a119f55c3060d882ceb' CourseFactory.create() f = SMMF(model=EdxAuthor, lookup='edx_uid') ms = f.to_internal_value([uid, uid2]) assert len(ms) == 2 assert ms[0].edx_uid != ms[1].edx_uid assert ms[0].edx_uid in [uid, uid2] assert ms[1].edx_uid in [uid, uid2] def test_errors_on_invalid_input(self): # pylint: disable=no-self-use """Only deserialize known, supported types.""" CourseFactory.create() f = SMMF(model=EdxAuthor, lookup='edx_uid') with pytest.raises(ValidationError): f.to_internal_value(dict())
mitodl/ccxcon
courses/fields_test.py
Python
agpl-3.0
3,535
from pyrser import dsl from pyrser import parsing from pyrser import meta from pyrser import error from collections import ChainMap class MetaGrammar(parsing.MetaBasicParser): """Metaclass for all grammars.""" def __new__(metacls, name, bases, namespace): # for multi heritance we have a simple inheritance relation # from the first class in declaration order. metabp = parsing.MetaBasicParser if len(bases) <= 1: cls = metabp.__new__(metacls, name, bases, namespace) else: b = tuple([bases[0]]) cls = metabp.__new__(metacls, name, b, namespace) # lookup for the metaclass of parsing. # Grammar magically inherit rules&hooks from Parser if 'Parser' in parsing.base._MetaBasicParser: clsbase = parsing.base._MetaBasicParser['Parser'] # link rules&hooks cls._rules = clsbase._rules.new_child() cls._hooks = clsbase._hooks.new_child() # add rules from DSL if 'grammar' in namespace and namespace['grammar'] is not None: sname = None if 'source' in namespace and namespace['source'] is not None: sname = namespace['source'] rules = cls.dsl_parser(namespace['grammar'], sname).get_rules() if not rules: return rules # namespace rules with module/classe name for rule_name, rule_pt in rules.items(): if '.' not in rule_name: rule_name = cls.__module__ \ + '.' + cls.__name__ \ + '.' + rule_name meta.set_one(cls._rules, rule_name, rule_pt) # add localy define rules (and thus overloads) if '_rules' in namespace and namespace['_rules'] is not None: cls._rules.update(namespace['_rules']) # add localy define hooks if '_hooks' in namespace and namespace['_hooks'] is not None: cls._hooks.update(namespace['_hooks']) # Manage Aggregation if len(bases) > 1: aggreg_rules = ChainMap() aggreg_hooks = ChainMap() for subgrammar in bases: if hasattr(subgrammar, '_rules'): aggreg_rules = ChainMap(*(aggreg_rules.maps + subgrammar._rules.maps)) if hasattr(subgrammar, '_hooks'): aggreg_hooks = ChainMap(*(aggreg_hooks.maps + subgrammar._hooks.maps)) # aggregate at toplevel the branch grammar cls._rules = ChainMap(*(cls._rules.maps + aggreg_rules.maps)) cls._hooks = ChainMap(*(cls._hooks.maps + aggreg_hooks.maps)) # clean redondant in chain for rules orderedunique_rules = [] tocpy_rules = set([id(_) for _ in cls._rules.maps]) for ch in cls._rules.maps: idch = id(ch) if idch in tocpy_rules: orderedunique_rules.append(ch) tocpy_rules.remove(idch) cls._rules = ChainMap(*orderedunique_rules) # clean redondant in chain for hooks orderedunique_hooks = [] tocpy_hooks = set([id(_) for _ in cls._hooks.maps]) for ch in cls._hooks.maps: idch = id(ch) if idch in tocpy_hooks: orderedunique_hooks.append(ch) tocpy_hooks.remove(idch) cls._hooks = ChainMap(*orderedunique_hooks) return cls class Grammar(parsing.Parser, metaclass=MetaGrammar): """ Base class for all grammars. This class turn any class A that inherit it into a grammar. Taking the description of the grammar in parameter it will add all what is what is needed for A to parse it. """ # Text grammar to generate parsing rules for this class. grammar = None # Name of the default rule to parse the grammar. entry = None # DSL parsing class dsl_parser = dsl.EBNF def after_parse(self, node: parsing.Node) -> parsing.Node: """ If you want to do some stuff after parsing, overload this... """ return node def _do_parse(self, entry: str) -> parsing.Node: res = None self.diagnostic = error.Diagnostic() try: res = self.eval_rule(entry) except error.Diagnostic as d: # User put an error rule d.notify( error.Severity.ERROR, "Exception during the evaluation of '%s'" % self._lastRule, error.LocationInfo.from_stream(self._stream, is_error=self.from_string) ) self.diagnostic = d if not res: # we fail to parse, but error is not set on the last rule self.diagnostic.notify( error.Severity.ERROR, "Parse error in '%s'" % self._lastRule, error.LocationInfo.from_maxstream(self._stream, is_error=self.from_string) ) if self.raise_diagnostic: raise self.diagnostic else: return self # clear contexted variables self.rule_nodes.clear() # create a new Diagnostic object for the node result res.diagnostic = error.Diagnostic() # all is ok return self.after_parse(res) def parse(self, source: str =None, entry: str =None) -> parsing.Node: """Parse source using the grammar""" self.from_string = True if source is not None: self.parsed_stream(source) if entry is None: entry = self.entry if entry is None: raise ValueError("No entry rule name defined for {}".format( self.__class__.__name__)) return self._do_parse(entry) def parse_file(self, filename: str, entry: str =None) -> parsing.Node: """Parse filename using the grammar""" self.from_string = False import os.path if os.path.exists(filename): f = open(filename, 'r') self.parsed_stream(f.read(), os.path.abspath(filename)) f.close() if entry is None: entry = self.entry if entry is None: raise ValueError("No entry rule name defined for {}".format( self.__class__.__name__)) return self._do_parse(entry) generated_class = 0 def build_grammar(inherit: tuple, scope: dict) -> Grammar: global generated_class class_name = "gen_class_" + str(generated_class) generated_class += 1 return type(class_name, inherit, scope) def from_string(bnf: str, entry=None, *optional_inherit) -> Grammar: """ Create a Grammar from a string """ inherit = [Grammar] + list(optional_inherit) scope = {'grammar': bnf, 'entry': entry} return build_grammar(tuple(inherit), scope) def from_file(fn: str, entry=None, *optional_inherit) -> Grammar: """ Create a Grammar from a file """ import os.path if os.path.exists(fn): f = open(fn, 'r') bnf = f.read() f.close() inherit = [Grammar] + list(optional_inherit) scope = {'grammar': bnf, 'entry': entry, 'source': fn} return build_grammar(tuple(inherit), scope) raise Exception("File not Found!")
payet-s/pyrser
pyrser/grammar.py
Python
gpl-3.0
7,534
import uuid from random import randint from django.shortcuts import render from django.http import HttpResponseRedirect from .models import Url def index(request): if request.session.has_key("has_url"): url = request.session.get("has_url") del request.session['has_url'] return render(request, "miudo/index.html", locals()) return render(request, "miudo/index.html", {}) def make_url(request): if request.method == "POST": url = None # initial url url_site = request.POST['url'] url_id = generate_key() try: url = Url.objects.get(url_id = url_id) while url: url_id = generate_key() url = Url.objects.get(url_id = url_id) create_url(request, url_id, url_site) request.session["has_url"] = url_id except Url.DoesNotExist: create_url(request, url_id, url_site) request.session["has_url"] = url_id return HttpResponseRedirect("/") def create_url(custom_request, url_id, url_site): if custom_request.user.is_authenticated(): url = Url.objects.create(url_id = url_id, url_site = url_site, url_author = custom_request.user) else: url = Url.objects.create(url_id = url_id, url_site = url_site) url.save() def generate_key(): to_choose = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; url_id = "" while len(url_id) != 6: i = randint(0, len(to_choose) - 1) url_id += to_choose[i] return url_id def redirect_url(request, url_id=None): try: url = Url.objects.get(url_id = url_id) url.url_clicked = url.url_clicked + 1 url.save() except Url.DoesNotExist: return render(request, "base/page_not_found.html", {}) return HttpResponseRedirect(url.url_site)
luisalves05/shortener-url
src/apps/miudo/views.py
Python
mit
1,890
#!/usr/bin/env python """Tests for the memory handler functions.""" import StringIO # pylint: disable=unused-import,g-bad-import-order from grr.client import client_plugins # pylint: enable=unused-import,g-bad-import-order from grr.client.vfs_handlers import memory from grr.lib import flags from grr.lib import rdfvalue from grr.lib import test_lib class FakeFile(object): """A Fake file object.""" offset = 0 # Implementing the file interface. # pylint: disable=g-bad-name def read(self, length): return "X" * length def seek(self, offset): self.offset = offset def tell(self): return self.offset class MockOSXMemory(memory.OSXMemory): def __init__(self): page_size = memory.OSXMemory.page_size self.runs = [(0 * page_size, 1 * page_size), (1 * page_size, 1 * page_size), (2 * page_size, 8 * page_size), (10 * page_size, 1 * page_size), (15 * page_size, 1 * page_size), (16 * page_size, 1 * page_size)] self.fd = FakeFile() class OSXMemoryTest(test_lib.GRRBaseTest): def GetVFSHandler(self): return MockOSXMemory() def testPartialRead(self): handler = self.GetVFSHandler() self.page_size = handler.page_size last_region = handler.runs[-1] self.assertEqual(handler.size, last_region[0] + last_region[1]) for start, length, valid in [ # Just read a bit. (0, 100, 100), # Read over page boundary. (self.page_size - 100, 200, 200), # Read to page boundary. (self.page_size - 100, 100, 100), # Read from page boundary. (self.page_size, 100, 100), # Read into invalid region (unmapped region starts at 11 * page_size). (11 * self.page_size - 500, 1100, 500), # Read out of invalid region (unmapped region ends at 15 * page_size). (15 * self.page_size - 500, 1100, 600), # Read inside the invalid region. (12 * self.page_size, 1000, 0), # Read over the invalid region. (0, 17 * self.page_size, 13 * self.page_size), ]: handler.Seek(start) data = handler.Read(length) self.assertEqual(len(data), length) self.assertEqual(list(data).count("X"), valid) # Check that reading the unmapped region zero pads: handler.Seek(11 * self.page_size - 5) data = handler.Read(100) # Should return X for the valid region and 0 for the invalid region. self.assertEqual(data, "X" * 5 + "\x00" * 95) class TestLinuxMemory(OSXMemoryTest): """Test the linux memory handler.""" IOMEM = """ 00000000-00001000 : System RAM 00001000-00002000 : System RAM 00002000-0000a000 : System RAM 0000a000-0000b000 : System RAM 0000f000-00010000 : System RAM 00010000-00011000 : System RAM 00096400-0009ffff : reserved 000a0000-000bffff : PCI Bus 0000:00 000c0000-000dffff : PCI Bus 0000:40 000c0000-000dffff : PCI Bus 0000:00 000c0000-000ce7ff : Video ROM 000ce800-000cf7ff : Adapter ROM 000cf800-000cfbff : Adapter ROM 000d0000-000d85ff : Adapter ROM 000e0000-000fffff : reserved 000f0000-000fffff : System ROM 00100000-cb3e1fff : System RAM 01000000-015e2222 : Kernel code 015e2223-01a9537f : Kernel data 01b81000-01c82fff : Kernel bss cb3e2000-cb7f3fff : reserved cb7f4000-cb9c4fff : ACPI Non-volatile Storage cb9c5000-cbae6fff : reserved cbae7000-cbffffff : ACPI Non-volatile Storage ff000000-ffffffff : pnp 00:0c 100000000-82fffffff : System RAM 3c0000000000-3c007fffffff : PCI Bus 0000:00 """ def GetVFSHandler(self): def FakeOpen(filename, mode="r"): """The linux driver just opens the device.""" # It uses /proc/iomem to find the protected areas. if filename == "/proc/iomem": return StringIO.StringIO(self.IOMEM) self.assertEqual(filename, "/dev/pmem") self.assertEqual(mode, "rb") return FakeFile() with test_lib.Stubber(memory, "open", FakeOpen): result = memory.LinuxMemory(None, pathspec=rdfvalue.PathSpec( path="/dev/pmem", pathtype=rdfvalue.PathSpec.PathType.MEMORY)) self.assertEqual(result.size, 0x82fffffff) return result class Win32FileMock(object): GENERIC_READ = GENERIC_WRITE = FILE_SHARE_READ = FILE_SHARE_WRITE = 0 OPEN_EXISTING = FILE_ATTRIBUTE_NORMAL = 0 def CreateFile(self, path, *_): self.path = path return path def ReadFile(self, fd, length): assert self.path == fd return True, "X" * length def SetFilePointer(self, fd, offset, whence=0): assert fd == self.path self.offset = offset _ = whence def DeviceIoControl(self, fd, *_): assert fd == self.path return ("0070180000000000" # CR3 "b11d000000000000" # NtBuildNumber "00a0800200f8ffff" # KernBase "a0909f0200f8ffff" # KDBG "00ad9f0200f8ffff" + # KPCR "0000000000000000" * 31 + "7872ab0200f8ffff" # PfnDataBase "d0d6a40200f8ffff" # PsLoadedModuleList "d0f3a20200f8ffff" + # PsActiveProcessHead "0000000000000000" * 0xFF + # Padding "0700000000000000" # NumberOfRuns "0000000000000000" "0010000000000000" "0010000000000000" "0010000000000000" "0020000000000000" "0080000000000000" "00a0000000000000" "0010000000000000" "00f0000000000000" "0010000000000000" "0000010000000000" "0010000000000000" "0000000001000000" "FFFFFF2F07000000").decode("hex") class TestWindowsMemory(OSXMemoryTest): """Test the windows memory handler.""" def GetVFSHandler(self): result = memory.WindowsMemory(None, pathspec=rdfvalue.PathSpec( path=r"\\.\pmem", pathtype=rdfvalue.PathSpec.PathType.MEMORY)) self.assertEqual(result.size, 0x82fffffff) return result def testPartialRead(self): with test_lib.Stubber(memory, "win32file", Win32FileMock()): super(TestWindowsMemory, self).testPartialRead() def main(argv): test_lib.main(argv) if __name__ == "__main__": flags.StartMain(main)
spnow/grr
client/vfs_handlers/memory_test.py
Python
apache-2.0
6,127
''' Use this script from terminal / console with ./python sisostudy.py --file_storage=my_runs Will create an output with all the necessary information ''' # Import the pacakges # Numpy for numerical methods import numpy as np # Python Control for SISO creation etc. import control as cn # Pandas for Data Storage import pandas as pd # Import the Algorithms import sys sys.path.append('../../') import Algorithms as alg import matplotlib.pyplot as plt import sys # Define an experiment from sacred import Experiment ex = Experiment() # Configuration @ex.config def experimental_setup(): # Set up the Experiment and define the range of system gain, lag and delay as well as filename etc filename = 'SISO_06102017_TSum.csv' # Sample size per system order sample_size = 9000 # Maximum System Order (-1) max_order = 9 # System noise in percent of gain noise_limit = 0 # Gain Limits gain_limits = [-10,10] # Lag Limits lag_limits = [.1,1.] # Sum Time Constant TSum = 100 # Delay Limits -> If small no delay delay_limits = [1, 10] # Create the system numerator -> Gains K = np.random.uniform(gain_limits[0],gain_limits[1],(sample_size)) Num = np.zeros((sample_size)) # Create the system denominator -> Lags T = np.random.uniform(lag_limits[0],lag_limits[1],(sample_size,max_order)) Den = np.zeros((sample_size, max_order+1)) # Make a uniform Tsum for samples in range(0,sample_size): # Compute the current order degree = int(1.0*samples/sample_size*max_order)+1 # Compute an the sum and distribute evenly dist = np.sort(T[samples,:degree]) dist = TSum*dist/np.sum(dist) # Compute the poles from the distances dist = np.polynomial.polynomial.polyfromroots(-1./dist) Den[samples,:(degree+1)] = dist[::-1] #print(Den[samples,:]) # Normalize the gain Num[samples] = K[samples]*Den[samples,degree] # Create the system delay L = np.random.uniform(delay_limits[0], delay_limits[1],(sample_size)) # Create an array for the results: sys_no, order, K,T, k,t,l, ms_real,ms_ideal, 4x t_rise, mp, t_settle, yss columns = ['Sample No.', 'Order', 'K', 'TDom', 'L', 'KM', 'TM', 'LM', 'MS Real', 'MS Ideal', 'MT Real', 'MT Ideal'] R = pd.DataFrame(columns=columns) # Experimental Study @ex.automain def experiment(Num,Den,L,R,noise_limit,sample_size,max_order,filename, columns): # Define system no sys_no = 0 # Define an inner loop over samples for sample in range(0,sample_size): # Current order current_order = int(1.0*sample/sample_size*max_order)+1 # Current System sys_no += 1 # Get the denominator den = Den[sample,:current_order+1] # Get the gain num = Num[sample] # Get the delay l = L[sample] # Define a Transfer Function with pole multiplicity G = cn.tf(num,den) # Add delay with pade approximation of order 10 num, den = cn.pade(l,10) G = G*cn.tf(num,den) # Step response t = np.linspace(0,1000,10000) y,t = cn.step(G,t) #plt.plot(t,y) #plt.show() # Add noise if noise_limit > 1e-3: y = y + np.random.normal(0,noise_limit*Num[sample],y.size) u = np.ones_like(t) # Identify the system km,tm,lm = alg.Integral_Identification(y,u,t) # Make a model of the system num,den = cn.pade(lm,10) GM = cn.tf([km],[tm,1])*cn.tf(num,den) # Tune AMIGO controller params, b = alg.AMIGO_Tune(km,tm,lm) # Define a Controller with setpoint ky = cn.tf([params[0]],[1])+cn.tf([params[1]],[1,0]) kr = cn.tf([b*params[0]],[1])+cn.tf([params[1]],[1,0]) # REAL SYSTEM # Real system closed loop, setpoint weight real_clsw = cn.feedback(G,ky)*kr # Real system closed loop, without setpoint weight real_cl = cn.feedback(G*ky,1) # Real system sensitivity real_sens = 1/(1+G*ky) # Real system complementary sensitivity real_comp = real_sens*G*kr # IDENTIFIED SYSTEM # Identified system closed loop, setpoint weight iden_clsw = cn.feedback(GM,ky)*kr # Identified system closed loop, without setpoint weight iden_cl = cn.feedback(GM*ky,1) # Identified system sensitivity iden_sens = 1/(1+GM*ky) # Identified system complementary sensitivity iden_comp = iden_sens*GM*kr # Step response y_rclsw,t_rclsw = cn.step(real_clsw) y_rcl,t_rcl = cn.step(real_cl) y_iclsw,t_iclsw = cn.step(iden_clsw) y_icl, t_icl = cn.step(iden_cl) # Compute the gain # Define Frequency range omega = np.logspace(-5,5,1000) gain, phase, omega = cn.freqresp(real_sens,omega) MS_Real = np.max(gain) gain,phase,omega = cn.freqresp(real_comp,omega) MT_Real = np.max(gain) gain, phase, omega = cn.freqresp(iden_sens,omega) MS_Iden = np.max(gain) gain,phase,omega = cn.freqresp(iden_comp,omega) MT_Iden = np.max(gain) # Get the Step Information #Tr_RSW, Mp_RSW, Ts_RSW, Ys_RSW = alg.Step_Info(y_rclsw,t_rclsw) #Tr_R, Mp_R, Ts_R, Ys_R = alg.Step_Info(y_rcl,t_rcl) #Tr_ISW, Mp_ISW, Ts_ISW, Ys_ISW = alg.Step_Info(y_iclsw,t_iclsw) #Tr_I, Mp_I, Ts_I, Ys_I = alg.Step_Info(y_icl,t_icl) # Append Data #if order == 1: R.loc[sys_no-1] = [sys_no, current_order, Num[sample]/Den[sample,current_order], np.max(np.abs(np.real(np.polynomial.polynomial.polyroots(Den[sample,:current_order+1])))),L[sample], km, tm, lm, MS_Real, MS_Iden, MT_Real, MT_Iden] #else: # R.loc[sys_no-1] = [sys_no, order, N[order][sample], np.sum(D[order][sample][0:order-1]), km, tm, lm, MS_Real, MS_Iden, Tr_RSW, Mp_RSW, Ts_RSW, Ys_RSW, Tr_R, Mp_R, Ts_R, Ys_R, Tr_ISW, Mp_ISW, Ts_ISW, Ys_ISW, Tr_I, Mp_I, Ts_I, Ys_I] per = float(sample/sample_size)*100 sys.stdout.write("\r %f" %per) sys.stdout.flush() R.to_csv(filename, sep=';')
AlCap23/Thesis
Python/Experiments/SISO/sisostudy_TSUM.py
Python
gpl-3.0
5,574
import sys sys.path.insert(1, "../../../") import h2o, tests def binop_pipe(): iris = h2o.import_file(path=tests.locate("smalldata/iris/iris_wheader.csv")) rows, cols = iris.dim iris.show() # frame/scaler res = 5 | iris rows, cols = res.dim assert rows == rows and cols == cols, "dimension mismatch" res = iris | 1 rows, cols = res.dim assert rows == rows and cols == cols, "dimension mismatch" # vec/vec res = iris[0] | iris[1] rows = len(res) assert rows == rows, "dimension mismatch" # vec/scaler res = iris[0] | 1 rows = res.nrow assert rows == rows, "dimension mismatch" new_rows = iris[res].nrow assert new_rows == rows, "wrong number of rows returned" res = 1 | iris[1] rows = res.nrow assert rows == rows, "dimension mismatch" new_rows = iris[res].nrow assert new_rows == rows, "wrong number of rows returned" # frame/vec #try: # res = iris | iris[0] # res.show() # assert False, "expected error. objects of different dimensions not supported." #except EnvironmentError: # pass #try: # res = iris[3] | iris # res.show() # assert False, "expected error. objects of different dimensions not supported." #except EnvironmentError: # pass # frame/frame res = iris | iris rows, cols = res.dim assert rows == rows and cols == cols, "dimension mismatch" res = iris[0:2] | iris[1:3] rows, cols = res.dim assert rows == rows and cols == 2, "dimension mismatch" #try: # res = iris | iris[0:3] # res.show() # assert False, "expected error. frames are different dimensions." #except EnvironmentError: # pass if __name__ == "__main__": tests.run_test(sys.argv, binop_pipe)
kyoren/https-github.com-h2oai-h2o-3
h2o-py/tests/testdir_munging/binop/pyunit_binop2_pipe.py
Python
apache-2.0
1,834
from flask import Blueprint from my_app.hello.models import MESSAGES hello = Blueprint('hello', __name__) @hello.route('/') @hello.route('/hello') def hello_world(): return MESSAGES['default'] @hello.route('/show/<key>') def get_message(key): return MESSAGES.get(key) or "%s not found!" % key @hello.route('/add/<key>/<message>') def add_or_update_message(key, message): MESSAGES[key] = message return "%s Added/Updated" % key
nikitabrazhnik/flask2
Module 2/Chapter01/my_app/hello/views.py
Python
mit
450
# encoding: utf-8 # # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Author: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import unicode_literals from collections import Mapping from pyLibrary.debugs.logs import Log from pyLibrary.dot import wrap, set_default, split_field from pyLibrary.dot.dicts import Dict from pyLibrary.queries import containers type2container = Dict() config = Dict() # config.default IS EXPECTED TO BE SET BEFORE CALLS ARE MADE _ListContainer = None def _delayed_imports(): global type2container global _ListContainer from pyLibrary.queries.containers.lists import ListContainer as _ListContainer _ = _ListContainer try: from pyLibrary.queries.qb_usingMySQL import MySQL as _MySQL except Exception: _MySQL = None from pyLibrary.queries.qb_usingES import FromES as _FromES from pyLibrary.queries.meta import FromESMetadata as _FromESMetadata set_default(type2container, { "elasticsearch": _FromES, "mysql": _MySQL, "memory": None, "meta": _FromESMetadata }) def wrap_from(frum, schema=None): """ :param frum: :param schema: :return: """ if not type2container: _delayed_imports() frum = wrap(frum) if isinstance(frum, basestring): if not containers.config.default.settings: Log.error("expecting pyLibrary.queries.query.config.default.settings to contain default elasticsearch connection info") type_ = None index = frum if frum.startswith("meta."): from pyLibrary.queries.meta import FromESMetadata if frum == "meta.columns": return meta.singlton.columns elif frum == "meta.table": return meta.singlton.tables else: Log.error("{{name}} not a recognized table", name=frum) else: type_ = containers.config.default.type index = split_field(frum)[0] settings = set_default( { "index": index, "name": frum }, containers.config.default.settings ) settings.type = None return type2container[type_](settings) elif isinstance(frum, Mapping) and frum.type and type2container[frum.type]: # TODO: Ensure the frum.name is set, so we capture the deep queries if not frum.type: Log.error("Expecting from clause to have a 'type' property") return type2container[frum.type](frum.settings) elif isinstance(frum, Mapping) and (frum["from"] or isinstance(frum["from"], (list, set))): from pyLibrary.queries.query import Query return Query(frum, schema=schema) elif isinstance(frum, (list, set)): return _ListContainer("test_list", frum) else: return frum import es09.util
klahnakoski/MoDataSubmission
pyLibrary/queries/__init__.py
Python
mpl-2.0
3,044
import random import torch from torch.autograd import Variable from train_util import variable_from_sentence class ModelPredictor(object): def __init__(self, encoder, decoder, input_lang, output_lang, max_length): self.encoder = encoder self.decoder = decoder self.input_lang = input_lang self.output_lang = output_lang self.max_length = max_length def evaluate(self, sentence): SOS_token = self.input_lang.word2index["SOS"] EOS_token = self.input_lang.word2index["EOS"] input_variable = variable_from_sentence(self.input_lang, sentence, self.max_length) input_length = input_variable.size()[0] encoder_hidden = self.encoder.init_hidden() encoder_outputs = Variable(torch.zeros(self.max_length, self.encoder.hidden_size)) for ei in range(input_length): encoder_output, encoder_hidden = self.encoder(input_variable[ei], encoder_hidden) encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0] decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS decoder_hidden = encoder_hidden decoded_words = [] for di in range(self.max_length): decoder_output, decoder_hidden, decoder_attention = self.decoder( decoder_input, decoder_hidden, encoder_outputs) topv, topi = decoder_output.data.topk(1) ni = topi[0][0] if ni == EOS_token: break else: decoded_words.append(self.output_lang.index2word[ni]) decoder_input = Variable(torch.LongTensor([[ni]])) return decoded_words def evaluate_randomly(self, pairs, n=10): match = 0 for i in range(n): pair = random.choice(pairs) print('>', pair[0]) print('=', pair[1]) output_words = self.evaluate(pair[0]) output_sentence = ' '.join(output_words) print('<', output_sentence) print('') if pair[1] == output_sentence: match += 1 print("accuracy: ", (match / n) * 100, "%") def predict_sentence(self, sentence): return ' '.join(self.evaluate(sentence))
Taekyoon/Pytorch_Seq2Seq_Tutorial
predict.py
Python
mit
2,310
from __future__ import division, print_function import numpy as np from itertools import product import warnings from scipy.sparse import csr_matrix from sklearn import datasets from sklearn import svm from sklearn.datasets import make_multilabel_classification from sklearn.random_projection import sparse_random_matrix from sklearn.utils.validation import check_array, check_consistent_length from sklearn.utils.validation import check_random_state from sklearn.utils.testing import assert_raises, clean_warning_registry from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.metrics import auc from sklearn.metrics import average_precision_score from sklearn.metrics import coverage_error from sklearn.metrics import label_ranking_average_precision_score from sklearn.metrics import precision_recall_curve from sklearn.metrics import label_ranking_loss from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from sklearn.exceptions import UndefinedMetricWarning ############################################################################### # Utilities for testing def make_prediction(dataset=None, binary=False): """Make some classification predictions on a toy dataset using a SVC If binary is True restrict to a binary classification problem instead of a multiclass classification problem """ if dataset is None: # import some data to play with dataset = datasets.load_iris() X = dataset.data y = dataset.target if binary: # restrict to a binary classification task X, y = X[y < 2], y[y < 2] n_samples, n_features = X.shape p = np.arange(n_samples) rng = check_random_state(37) rng.shuffle(p) X, y = X[p], y[p] half = int(n_samples / 2) # add noisy features to make the problem harder and avoid perfect results rng = np.random.RandomState(0) X = np.c_[X, rng.randn(n_samples, 200 * n_features)] # run classifier, get class probabilities and label predictions clf = svm.SVC(kernel='linear', probability=True, random_state=0) probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:]) if binary: # only interested in probabilities of the positive case # XXX: do we really want a special API for the binary case? probas_pred = probas_pred[:, 1] y_pred = clf.predict(X[half:]) y_true = y[half:] return y_true, y_pred, probas_pred ############################################################################### # Tests def _auc(y_true, y_score): """Alternative implementation to check for correctness of `roc_auc_score`.""" pos_label = np.unique(y_true)[1] # Count the number of times positive samples are correctly ranked above # negative samples. pos = y_score[y_true == pos_label] neg = y_score[y_true != pos_label] diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1) n_correct = np.sum(diff_matrix > 0) return n_correct / float(len(pos) * len(neg)) def _average_precision(y_true, y_score): """Alternative implementation to check for correctness of `average_precision_score`. Note that this implementation fails on some edge cases. For example, for constant predictions e.g. [0.5, 0.5, 0.5], y_true = [1, 0, 0] returns an average precision of 0.33... but y_true = [0, 0, 1] returns 1.0. """ pos_label = np.unique(y_true)[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_score = y_score[order] y_true = y_true[order] score = 0 for i in range(len(y_score)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec return score / n_pos def _average_precision_slow(y_true, y_score): """A second alternative implementation of average precision that closely follows the Wikipedia article's definition (see References). This should give identical results as `average_precision_score` for all inputs. References ---------- .. [1] `Wikipedia entry for the Average precision <http://en.wikipedia.org/wiki/Average_precision>`_ """ precision, recall, threshold = precision_recall_curve(y_true, y_score) precision = list(reversed(precision)) recall = list(reversed(recall)) average_precision = 0 for i in range(1, len(precision)): average_precision += precision[i] * (recall[i] - recall[i - 1]) return average_precision def test_roc_curve(): # Test Area under Receiver Operating Characteristic (ROC) curve y_true, _, probas_pred = make_prediction(binary=True) expected_auc = _auc(y_true, probas_pred) for drop in [True, False]: fpr, tpr, thresholds = roc_curve(y_true, probas_pred, drop_intermediate=drop) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, expected_auc, decimal=2) assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred)) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_end_points(): # Make sure that roc_curve returns a curve start at 0 and ending and # 1 even in corner cases rng = np.random.RandomState(0) y_true = np.array([0] * 50 + [1] * 50) y_pred = rng.randint(3, size=100) fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True) assert_equal(fpr[0], 0) assert_equal(fpr[-1], 1) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thr.shape) def test_roc_returns_consistency(): # Test whether the returned threshold matches up with tpr # make small toy dataset y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred) # use the given thresholds to determine the tpr tpr_correct = [] for t in thresholds: tp = np.sum((probas_pred >= t) & y_true) p = np.sum(y_true) tpr_correct.append(1.0 * tp / p) # compare tpr and tpr_correct to see if the thresholds' order was correct assert_array_almost_equal(tpr, tpr_correct, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_multi(): # roc_curve not applicable for multi-class problems y_true, _, probas_pred = make_prediction(binary=False) assert_raises(ValueError, roc_curve, y_true, probas_pred) def test_roc_curve_confidence(): # roc_curve for confidence scores y_true, _, probas_pred = make_prediction(binary=True) fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.90, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_hard(): # roc_curve for hard decisions y_true, pred, probas_pred = make_prediction(binary=True) # always predict one trivial_pred = np.ones(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # always predict zero trivial_pred = np.zeros(y_true.shape) fpr, tpr, thresholds = roc_curve(y_true, trivial_pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.50, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # hard decisions fpr, tpr, thresholds = roc_curve(y_true, pred) roc_auc = auc(fpr, tpr) assert_array_almost_equal(roc_auc, 0.78, decimal=2) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_one_label(): y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # assert there are warnings w = UndefinedMetricWarning fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred) # all true labels, all fpr should be nan assert_array_equal(fpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) # assert there are warnings fpr, tpr, thresholds = assert_warns(w, roc_curve, [1 - x for x in y_true], y_pred) # all negative labels, all tpr should be nan assert_array_equal(tpr, np.nan * np.ones(len(thresholds))) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) def test_roc_curve_toydata(): # Binary classification y_true = [0, 1] y_score = [0, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [0, 1] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1, 1]) assert_array_almost_equal(fpr, [0, 0, 1]) assert_almost_equal(roc_auc, 0.) y_true = [1, 0] y_score = [1, 1] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, 0.5) y_true = [1, 0] y_score = [1, 0] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [1, 1]) assert_almost_equal(roc_auc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] tpr, fpr, _ = roc_curve(y_true, y_score) roc_auc = roc_auc_score(y_true, y_score) assert_array_almost_equal(tpr, [0, 1]) assert_array_almost_equal(fpr, [0, 1]) assert_almost_equal(roc_auc, .5) y_true = [0, 0] y_score = [0.25, 0.75] # assert UndefinedMetricWarning because of no positive sample in y_true tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [0., 0.5, 1.]) assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan]) y_true = [1, 1] y_score = [0.25, 0.75] # assert UndefinedMetricWarning because of no negative sample in y_true tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score) assert_raises(ValueError, roc_auc_score, y_true, y_score) assert_array_almost_equal(tpr, [np.nan, np.nan]) assert_array_almost_equal(fpr, [0.5, 1.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro") assert_raises(ValueError, roc_auc_score, y_true, y_score, average="weighted") assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5) assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5) def test_roc_curve_drop_intermediate(): # Test that drop_intermediate drops the correct thresholds y_true = [0, 0, 0, 0, 1, 1] y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0] tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True) assert_array_almost_equal(thresholds, [1., 0.7, 0.]) # Test dropping thresholds with repeating scores y_true = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9, 0.6, 0.7, 0.8, 0.9, 0.9, 1.0] tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True) assert_array_almost_equal(thresholds, [1.0, 0.9, 0.7, 0.6, 0.]) def test_roc_curve_fpr_tpr_increasing(): # Ensure that fpr and tpr returned by roc_curve are increasing. # Construct an edge case with float y_score and sample_weight # when some adjacent values of fpr and tpr are actually the same. y_true = [0, 0, 1, 1, 1] y_score = [0.1, 0.7, 0.3, 0.4, 0.5] sample_weight = np.repeat(0.2, 5) fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight) assert_equal((np.diff(fpr) < 0).sum(), 0) assert_equal((np.diff(tpr) < 0).sum(), 0) def test_auc(): # Test Area Under Curve (AUC) computation x = [0, 1] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0] y = [0, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [1, 0, 0] y = [0, 1, 1] assert_array_almost_equal(auc(x, y), 0.5) x = [0, 1] y = [1, 1] assert_array_almost_equal(auc(x, y), 1) x = [0, 0.5, 1] y = [0, 0.5, 1] assert_array_almost_equal(auc(x, y), 0.5) def test_auc_duplicate_values(): # Test Area Under Curve (AUC) computation with duplicate values # auc() was previously sorting the x and y arrays according to the indices # from numpy.argsort(x), which was reordering the tied 0's in this example # and resulting in an incorrect area computation. This test detects the # error. x = [-2.0, 0.0, 0.0, 0.0, 1.0] y1 = [2.0, 0.0, 0.5, 1.0, 1.0] y2 = [2.0, 1.0, 0.0, 0.5, 1.0] y3 = [2.0, 1.0, 0.5, 0.0, 1.0] for y in (y1, y2, y3): assert_array_almost_equal(auc(x, y, reorder=True), 3.0) def test_auc_errors(): # Incompatible shapes assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2]) # Too few x values assert_raises(ValueError, auc, [0.0], [0.1]) # x is not in order x = [2, 1, 3, 4] y = [5, 6, 7, 8] error_message = ("x is neither increasing nor decreasing : " "{}".format(np.array(x))) assert_raise_message(ValueError, error_message, auc, x, y) def test_deprecated_auc_reorder(): depr_message = ("The 'reorder' parameter has been deprecated in version " "0.20 and will be removed in 0.22. It is recommended not " "to set 'reorder' and ensure that x is monotonic " "increasing or monotonic decreasing.") assert_warns_message(DeprecationWarning, depr_message, auc, [1, 2], [2, 3], reorder=True) def test_auc_score_non_binary_class(): # Test that roc_auc_score function returns an error when trying # to compute AUC for non-binary class values. rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) clean_warning_registry() with warnings.catch_warnings(record=True): rng = check_random_state(404) y_pred = rng.rand(10) # y_true contains only one class value y_true = np.zeros(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) y_true = -np.ones(10, dtype="int") assert_raise_message(ValueError, "ROC AUC score is not defined", roc_auc_score, y_true, y_pred) # y_true contains three different class values y_true = rng.randint(0, 3, size=10) assert_raise_message(ValueError, "multiclass format is not supported", roc_auc_score, y_true, y_pred) def test_binary_clf_curve(): rng = check_random_state(404) y_true = rng.randint(0, 3, size=10) y_pred = rng.rand(10) msg = "multiclass format is not supported" assert_raise_message(ValueError, msg, precision_recall_curve, y_true, y_pred) def test_precision_recall_curve(): y_true, _, probas_pred = make_prediction(binary=True) _test_precision_recall_curve(y_true, probas_pred) # Use {-1, 1} for labels; make sure original labels aren't modified y_true[np.where(y_true == 0)] = -1 y_true_copy = y_true.copy() _test_precision_recall_curve(y_true, probas_pred) assert_array_equal(y_true_copy, y_true) labels = [1, 0, 0, 1] predict_probas = [1, 2, 3, 4] p, r, t = precision_recall_curve(labels, predict_probas) assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.])) assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.])) assert_array_almost_equal(t, np.array([1, 2, 3, 4])) assert_equal(p.size, r.size) assert_equal(p.size, t.size + 1) def test_precision_recall_curve_pos_label(): y_true, _, probas_pred = make_prediction(binary=False) pos_label = 2 p, r, thresholds = precision_recall_curve(y_true, probas_pred[:, pos_label], pos_label=pos_label) p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label, probas_pred[:, pos_label]) assert_array_almost_equal(p, p2) assert_array_almost_equal(r, r2) assert_array_almost_equal(thresholds, thresholds2) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def _test_precision_recall_curve(y_true, probas_pred): # Test Precision-Recall and aread under PR curve p, r, thresholds = precision_recall_curve(y_true, probas_pred) precision_recall_auc = _average_precision_slow(y_true, probas_pred) assert_array_almost_equal(precision_recall_auc, 0.859, 3) assert_array_almost_equal(precision_recall_auc, average_precision_score(y_true, probas_pred)) assert_almost_equal(_average_precision(y_true, probas_pred), precision_recall_auc, decimal=3) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) # Smoke test in the case of proba having only one value p, r, thresholds = precision_recall_curve(y_true, np.zeros_like(probas_pred)) assert_equal(p.size, r.size) assert_equal(p.size, thresholds.size + 1) def test_precision_recall_curve_errors(): # Contains non-binary labels assert_raises(ValueError, precision_recall_curve, [0, 1, 2], [[0.0], [1.0], [1.0]]) def test_precision_recall_curve_toydata(): with np.errstate(all="raise"): # Binary classification y_true = [0, 1] y_score = [0, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [0, 1] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 0., 1.]) assert_array_almost_equal(r, [1., 0., 0.]) # Here we are doing a terrible prediction: we are always getting # it wrong, hence the average_precision_score is the accuracy at # chance: 50% assert_almost_equal(auc_prc, 0.5) y_true = [1, 0] y_score = [1, 1] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1., 0]) assert_almost_equal(auc_prc, .5) y_true = [1, 0] y_score = [1, 0] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [1, 1]) assert_array_almost_equal(r, [1, 0]) assert_almost_equal(auc_prc, 1.) y_true = [1, 0] y_score = [0.5, 0.5] p, r, _ = precision_recall_curve(y_true, y_score) auc_prc = average_precision_score(y_true, y_score) assert_array_almost_equal(p, [0.5, 1]) assert_array_almost_equal(r, [1, 0.]) assert_almost_equal(auc_prc, .5) y_true = [0, 0] y_score = [0.25, 0.75] assert_raises(Exception, precision_recall_curve, y_true, y_score) assert_raises(Exception, average_precision_score, y_true, y_score) y_true = [1, 1] y_score = [0.25, 0.75] p, r, _ = precision_recall_curve(y_true, y_score) assert_almost_equal(average_precision_score(y_true, y_score), 1.) assert_array_almost_equal(p, [1., 1., 1.]) assert_array_almost_equal(r, [1, 0.5, 0.]) # Multi-label classification task y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [0, 1]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 1.) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 1.) y_true = np.array([[0, 1], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_raises(Exception, average_precision_score, y_true, y_score, average="macro") assert_raises(Exception, average_precision_score, y_true, y_score, average="weighted") assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.75) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.5) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0, 1], [1, 0]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.5) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.5) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.5) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.5) y_true = np.array([[1, 0], [0, 1]]) y_score = np.array([[0.5, 0.5], [0.5, 0.5]]) assert_almost_equal(average_precision_score(y_true, y_score, average="macro"), 0.5) assert_almost_equal(average_precision_score(y_true, y_score, average="weighted"), 0.5) assert_almost_equal(average_precision_score(y_true, y_score, average="samples"), 0.5) assert_almost_equal(average_precision_score(y_true, y_score, average="micro"), 0.5) def test_average_precision_constant_values(): # Check the average_precision_score of a constant predictor is # the TPR # Generate a dataset with 25% of positives y_true = np.zeros(100, dtype=int) y_true[::4] = 1 # And a constant score y_score = np.ones(100) # The precision is then the fraction of positive whatever the recall # is, as there is only one threshold: assert_equal(average_precision_score(y_true, y_score), .25) def test_score_scale_invariance(): # Test that average_precision_score and roc_auc_score are invariant by # the scaling or shifting of probabilities # This test was expanded (added scaled_down) in response to github # issue #3864 (and others), where overly aggressive rounding was causing # problems for users with very small y_score values y_true, _, probas_pred = make_prediction(binary=True) roc_auc = roc_auc_score(y_true, probas_pred) roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred) roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred) roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10) assert_equal(roc_auc, roc_auc_scaled_up) assert_equal(roc_auc, roc_auc_scaled_down) assert_equal(roc_auc, roc_auc_shifted) pr_auc = average_precision_score(y_true, probas_pred) pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred) pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred) pr_auc_shifted = average_precision_score(y_true, probas_pred - 10) assert_equal(pr_auc, pr_auc_scaled_up) assert_equal(pr_auc, pr_auc_scaled_down) assert_equal(pr_auc, pr_auc_shifted) def check_lrap_toy(lrap_score): # Check on several small example that it works assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1) assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]), (2 / 3 + 1 / 1) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]), (1 / 2 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]), (1 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]), (1 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]), (1 / 2 + 2 / 3) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1) # Tie handling assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5) assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3) assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]), (2 / 3 + 1 / 2) / 2) assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3) assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]), 3 / 4) def check_zero_or_all_relevant_labels(lrap_score): random_state = check_random_state(0) for n_labels in range(2, 5): y_score = random_state.uniform(size=(1, n_labels)) y_score_ties = np.zeros_like(y_score) # No relevant labels y_true = np.zeros((1, n_labels)) assert_equal(lrap_score(y_true, y_score), 1.) assert_equal(lrap_score(y_true, y_score_ties), 1.) # Only relevant labels y_true = np.ones((1, n_labels)) assert_equal(lrap_score(y_true, y_score), 1.) assert_equal(lrap_score(y_true, y_score_ties), 1.) # Degenerate case: only one label assert_almost_equal(lrap_score([[1], [0], [1], [0]], [[0.5], [0.5], [0.5], [0.5]]), 1.) def check_lrap_error_raised(lrap_score): # Raise value error if not appropriate format assert_raises(ValueError, lrap_score, [0, 1, 0], [0.25, 0.3, 0.2]) assert_raises(ValueError, lrap_score, [0, 1, 2], [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]]) assert_raises(ValueError, lrap_score, [(0), (1), (2)], [[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]]) # Check that y_true.shape != y_score.shape raise the proper exception assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]]) assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]]) assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]]) assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]]) def check_lrap_only_ties(lrap_score): # Check tie handling in score # Basic check with only ties and increasing label space for n_labels in range(2, 10): y_score = np.ones((1, n_labels)) # Check for growing number of consecutive relevant for n_relevant in range(1, n_labels): # Check for a bunch of positions for pos in range(n_labels - n_relevant): y_true = np.zeros((1, n_labels)) y_true[0, pos:pos + n_relevant] = 1 assert_almost_equal(lrap_score(y_true, y_score), n_relevant / n_labels) def check_lrap_without_tie_and_increasing_score(lrap_score): # Check that Label ranking average precision works for various # Basic check with increasing label space size and decreasing score for n_labels in range(2, 10): y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1) # First and last y_true = np.zeros((1, n_labels)) y_true[0, 0] = 1 y_true[0, -1] = 1 assert_almost_equal(lrap_score(y_true, y_score), (2 / n_labels + 1) / 2) # Check for growing number of consecutive relevant label for n_relevant in range(1, n_labels): # Check for a bunch of position for pos in range(n_labels - n_relevant): y_true = np.zeros((1, n_labels)) y_true[0, pos:pos + n_relevant] = 1 assert_almost_equal(lrap_score(y_true, y_score), sum((r + 1) / ((pos + r + 1) * n_relevant) for r in range(n_relevant))) def _my_lrap(y_true, y_score): """Simple implementation of label ranking average precision""" check_consistent_length(y_true, y_score) y_true = check_array(y_true) y_score = check_array(y_score) n_samples, n_labels = y_true.shape score = np.empty((n_samples, )) for i in range(n_samples): # The best rank correspond to 1. Rank higher than 1 are worse. # The best inverse ranking correspond to n_labels. unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True) n_ranks = unique_rank.size rank = n_ranks - inv_rank # Rank need to be corrected to take into account ties # ex: rank 1 ex aequo means that both label are rank 2. corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum() rank = corr_rank[rank] relevant = y_true[i].nonzero()[0] if relevant.size == 0 or relevant.size == n_labels: score[i] = 1 continue score[i] = 0. for label in relevant: # Let's count the number of relevant label with better rank # (smaller rank). n_ranked_above = sum(rank[r] <= rank[label] for r in relevant) # Weight by the rank of the actual label score[i] += n_ranked_above / rank[label] score[i] /= relevant.size return score.mean() def check_alternative_lrap_implementation(lrap_score, n_classes=5, n_samples=20, random_state=0): _, y_true = make_multilabel_classification(n_features=1, allow_unlabeled=False, random_state=random_state, n_classes=n_classes, n_samples=n_samples) # Score with ties y_score = sparse_random_matrix(n_components=y_true.shape[0], n_features=y_true.shape[1], random_state=random_state) if hasattr(y_score, "toarray"): y_score = y_score.toarray() score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) # Uniform score random_state = check_random_state(random_state) y_score = random_state.uniform(size=(n_samples, n_classes)) score_lrap = label_ranking_average_precision_score(y_true, y_score) score_my_lrap = _my_lrap(y_true, y_score) assert_almost_equal(score_lrap, score_my_lrap) def test_label_ranking_avp(): for fn in [label_ranking_average_precision_score, _my_lrap]: yield check_lrap_toy, fn yield check_lrap_without_tie_and_increasing_score, fn yield check_lrap_only_ties, fn yield check_zero_or_all_relevant_labels, fn yield check_lrap_error_raised, label_ranking_average_precision_score for n_samples, n_classes, random_state in product((1, 2, 8, 20), (2, 5, 10), range(1)): yield (check_alternative_lrap_implementation, label_ranking_average_precision_score, n_classes, n_samples, random_state) def test_coverage_error(): # Toy case assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1) assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3) # Non trival case assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]], [[0.1, 10., -3], [0, 1, 3]]), (1 + 3) / 2.) assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]), (1 + 3 + 3) / 3.) assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]), (1 + 3 + 3) / 3.) def test_coverage_tie_handling(): assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0) assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2) assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3) assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3) def test_label_ranking_loss(): assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1) assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 0) assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 2 / 2) assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 1 / 2) assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 2 / 2) # Undefined metrics - the ranking doesn't matter assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 0) assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0) # Non trival case assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]], [[0.1, 10., -3], [0, 1, 3]]), (0 + 2 / 2) / 2.) assert_almost_equal(label_ranking_loss( [[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]), (0 + 2 / 2 + 1 / 2) / 3.) assert_almost_equal(label_ranking_loss( [[0, 1, 0], [1, 1, 0], [0, 1, 1]], [[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]), (0 + 2 / 2 + 1 / 2) / 3.) # Sparse csr matrices assert_almost_equal(label_ranking_loss( csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])), [[0.1, 10, -3], [3, 1, 3]]), (0 + 2 / 2) / 2.) def test_ranking_appropriate_input_shape(): # Check that y_true.shape != y_score.shape raise the proper exception assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0], [1]], [[0, 1], [0, 1]]) assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]]) def test_ranking_loss_ties_handling(): # Tie handling assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 1 / 2) assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0) assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1) assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
zorroblue/scikit-learn
sklearn/metrics/tests/test_ranking.py
Python
bsd-3-clause
44,265
"""Token system The capture gui application will format tokens in the filename. The tokens can be registered using `register_token` """ from . import lib _registered_tokens = dict() def format_tokens(string, options): """ Replace the tokens with the correlated strings :param string: the filename of the playblast :type string: str :param options: the capture options :type options: dict :return: the formatted filename with all tokens resolved :rtype: str """ if not string: return string for token, value in _registered_tokens.items(): if token in string: func = value['func'] string = string.replace(token, func(options)) return string def register_token(token, func, label=""): assert token.startswith("<") and token.endswith(">") assert callable(func) _registered_tokens[token] = {"func": func, "label": label} def list_tokens(): return _registered_tokens.copy() # register default tokens # scene based tokens def _camera_token(options): """Return short name of camera from capture options""" camera = options['camera'] camera = camera.rsplit("|", 1)[-1] # use short name camera = camera.replace(":", "_") # namespace `:` to `_` return camera register_token("<Camera>", _camera_token, label="Insert camera name") register_token("<Scene>", lambda options: lib.get_current_scenename() or "playblast", label="Insert current scene name") register_token("<RenderLayer>", lambda options: lib.get_current_renderlayer(), label="Insert active render layer name") # project based tokens register_token("<Images>", lambda options: lib.get_project_rule("images"), label="Insert image directory of set project") register_token("<Movies>", lambda options: lib.get_project_rule("movie"), label="Insert movies directory of set project")
Colorbleed/maya-capture-gui
capture_gui/tokens.py
Python
mit
1,977
# # Lecture 3 # Autoencoders - fully connected model # #import os import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.cm as cmx from libs.utils import montage from libs import gif import datetime # dja #np.set_printoptions(threshold=np.inf) # display FULL array (infinite) plt.ion() TID=datetime.date.today().strftime("%Y%m%d")+"_"+datetime.datetime.now().time().strftime("%H%M%S") from libs.datasets import MNIST ds = MNIST() print("ds.X.shape: ", ds.X.shape) plt.imshow(ds.X[0].reshape((28, 28))) # Let's get the first 1000 images of the dataset and reshape them imgs = ds.X[:1000].reshape((-1, 28, 28)) # Then create a montage and draw the montage plt.imshow(montage(imgs), cmap='gray') plt.pause(1) # Take the mean across all images mean_img = np.mean(ds.X, axis=0) # Then plot the mean image. #plt.figure() plt.imshow(mean_img.reshape((28, 28)), cmap='gray') plt.title("mean") plt.pause(1) # Take the std across all images std_img = np.std(ds.X, axis=0) # Then plot the std image. #plt.figure() plt.imshow(std_img.reshape((28, 28))) plt.title("std dev") plt.pause(1) # # CREATE THE NETWORK # # # 1 - Encoder # dimensions = [512, 256, 128, 64] # So the number of features is the second dimension of our inputs matrix, 784 n_features = ds.X.shape[1] # And we'll create a placeholder in the tensorflow graph that will be able to get any number of n_feature inputs. X = tf.placeholder(tf.float32, [None, n_features]) # let's first copy our X placeholder to the name current_input current_input = X n_input = n_features # We're going to keep every matrix we create so let's create a list to hold them all Ws = [] # We'll create a for loop to create each layer: for layer_i, n_output in enumerate(dimensions): # just like in the last session, # we'll use a variable scope to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("encoder/layer/{}".format(layer_i)): # Create a weight matrix which will increasingly reduce # down the amount of information in the input by performing # a matrix multiplication W = tf.get_variable( name='W', shape=[n_input, n_output], initializer=tf.random_normal_initializer(mean=0.0, stddev=0.02)) # Now we'll multiply our input by our newly created W matrix # and add the bias h = tf.matmul(current_input, W) # wut bias? # And then use a relu activation function on its output current_input = tf.nn.relu(h) # Finally we'll store the weight matrix so we can build the decoder. Ws.append(W) # We'll also replace n_input with the current n_output, so that on the # next iteration, our new number inputs will be correct. n_input = n_output print("current input shape: ", current_input.get_shape()) # # 2 - Decoder # # We'll first reverse the order of our weight matrices Ws = Ws[::-1] # then reverse the order of our dimensions # appending the last layers number of inputs. dimensions = dimensions[::-1][1:] + [ds.X.shape[1]] print("dimensions: ", dimensions) for layer_i, n_output in enumerate(dimensions): # we'll use a variable scope again to help encapsulate our variables # This will simply prefix all the variables made in this scope # with the name we give it. with tf.variable_scope("decoder/layer/{}".format(layer_i)): # Now we'll grab the weight matrix we created before and transpose it # So a 3072 x 784 matrix would become 784 x 3072 # or a 256 x 64 matrix, would become 64 x 256 W = tf.transpose(Ws[layer_i]) # Now we'll multiply our input by our transposed W matrix h = tf.matmul(current_input, W) # And then use a relu activation function on its output current_input = tf.nn.relu(h) # We'll also replace n_input with the current n_output, so that on the # next iteration, our new number inputs will be correct. n_input = n_output Y = current_input # We'll first measure the average difference across every pixel cost = tf.reduce_mean(tf.squared_difference(X, Y), 1) print("cost shape: ", cost.get_shape()) cost = tf.reduce_mean(cost) learning_rate = 0.001 optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) # dja sess = tf.Session() #pend#sess = tf.Session(tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)) sess.run(tf.initialize_all_variables()) print("Training...") t1 = datetime.datetime.now() # Some parameters for training batch_size = 100 n_epochs = 10 # We'll try to reconstruct the same first 100 images and show how # The network does over the course of training. examples = ds.X[:100] # We'll store the reconstructions in a list imgs = [] #fig, ax = plt.subplots(1, 1) for epoch_i in range(n_epochs): for batch_X, _ in ds.train.next_batch(): sess.run(optimizer, feed_dict={X: batch_X - mean_img}) recon = sess.run(Y, feed_dict={X: examples - mean_img}) recon = np.clip((recon + mean_img).reshape((-1, 28, 28)), 0, 255) print("epoch: ", epoch_i, " cost: ", sess.run(cost, feed_dict={X: batch_X - mean_img})) img_i = montage(recon).astype(np.uint8) imgs.append(img_i) #ax.imshow(img_i, cmap='gray') plt.imshow(img_i, cmap='gray') plt.title("epoch "+str(epoch_i)) plt.pause(1) #fig.canvas.draw() t2 = datetime.datetime.now() delta = t2 - t1 print(" Total training time: ", delta.total_seconds()) gif.build_gif(imgs, saveto='lecture3_fullyconnectedmodel_'+TID+'.gif', cmap='gray', interval=0.3, show_gif=False) plt.pause(10) input("press enter...") plt.close() # eop
dariox2/CADL
session-3/l3b-autoencoder-fullyconnected.py
Python
apache-2.0
5,832
from flask import render_template_string, render_template, request from urllib.parse import unquote from collections import defaultdict from .utils import drop_start, cache_filename from .language import get_language_label from .wikidata_api import QueryError, QueryTimeout, get_entity, get_entities from . import user_agent_headers, overpass, mail, language, match, matcher, commons from time import time import requests import requests.exceptions import os import json import re report_missing_values = False wd_entity = 'http://www.wikidata.org/entity/Q' enwiki = 'https://en.wikipedia.org/wiki/' skip_tags = {'route:road', 'route=road', 'highway=primary', 'highway=road', 'highway=service', 'highway=motorway', 'highway=trunk', 'highway=unclassified', 'highway', 'landuse' 'name', 'website', 'addr:street', 'type=associatedStreet', 'type=waterway', 'waterway=river'} edu = ['Tag:amenity=college', 'Tag:amenity=university', 'Tag:amenity=school', 'Tag:office=educational_institution'] tall = ['Key:height', 'Key:building:levels'] extra_keys = { 'Q3914': ['Tag:building=school', 'Tag:building=college', 'Tag:amenity=college', 'Tag:office=educational_institution'], # school 'Q322563': edu, # vocational school 'Q383092': edu, # film school 'Q1021290': edu, # music school 'Q1244442': edu, # school building 'Q1469420': edu, # adult education centre 'Q2143781': edu, # drama school 'Q2385804': edu, # educational institution 'Q5167149': edu, # cooking school 'Q7894959': edu, # University Technical College 'Q47530379': edu, # agricultural college 'Q11303': tall, # skyscraper 'Q18142': tall, # high-rise building 'Q33673393': tall, # multi-storey building 'Q641226': ['Tag:leisure=stadium'], # arena 'Q2301048': ['Tag:aeroway=helipad'], # special airfield 'Q622425': ['Tag:amenity=pub', 'Tag:amenity=music_venue'], # nightclub 'Q187456': ['Tag:amenity=pub', 'Tag:amenity=nightclub'], # bar 'Q16917': ['Tag:amenity=clinic', 'Tag:building=clinic'], # hospital 'Q330284': ['Tag:amenity=market'], # marketplace 'Q5307737': ['Tag:amenity=pub', 'Tag:amenity=bar'], # drinking establishment 'Q875157': ['Tag:tourism=resort'], # resort 'Q174782': ['Tag:leisure=park', 'Tag:highway=pedestrian', 'Tag:foot=yes', 'Tag:area=yes', 'Tag:amenity=market', 'Tag:leisure=common'], # square 'Q34627': ['Tag:religion=jewish'], # synagogue 'Q16970': ['Tag:religion=christian'], # church 'Q32815': ['Tag:religion=islam'], # mosque 'Q811979': ['Key:building'], # architectural structure 'Q11691': ['Key:building'], # stock exchange 'Q1329623': ['Tag:amenity=arts_centre', # cultural centre 'Tag:amenity=community_centre'], 'Q856584': ['Tag:amenity=library'], # library building 'Q11315': ['Tag:landuse=retail'], # shopping mall 'Q39658032': ['Tag:landuse=retail'], # open air shopping centre 'Q277760': ['Tag:historic=folly', 'Tag:historic=city_gate'], # gatehouse 'Q180174': ['Tag:historic=folly'], # folly 'Q15243209': ['Tag:leisure=park', 'Tag:boundary=national_park'], # historic district 'Q3010369': ['Tag:historic=monument'], # opening ceremony 'Q123705': ['Tag:place=suburb'], # neighbourhood 'Q256020': ['Tag:amenity=pub'], # inn 'Q41253': ['Tag:amenity=theatre'], # movie theater 'Q17350442': ['Tag:amenity=theatre'], # venue 'Q156362': ['Tag:amenity=winery'], # winery 'Q14092': ['Tag:leisure=fitness_centre', 'Tag:leisure=sports_centre'], # gymnasium 'Q27686': ['Tag:tourism=hostel', # hotel 'Tag:tourism=guest_house', 'Tag:building=hotel'], 'Q11707': ['Tag:amenity=cafe', 'Tag:amenity=fast_food', 'Tag:shop=deli', 'Tag:shop=bakery', 'Key:cuisine'], # restaurant 'Q2360219': ['Tag:amenity=embassy'], # permanent mission 'Q27995042': ['Tag:protection_title=Wilderness Area'], # wilderness area 'Q838948': ['Tag:historic=memorial', 'Tag:historic=monument'], # work of art 'Q23413': ['Tag:place=locality'], # castle 'Q28045079': ['Tag:historic=archaeological_site', 'Tag:site_type=fortification', 'Tag:embankment=yes'], # contour fort 'Q744099': ['Tag:historic=archaeological_site', 'Tag:site_type=fortification', 'Tag:embankment=yes'], # hillfort 'Q515': ['Tag:border_type=city'], # city 'Q1254933': ['Tag:amenity=university'], # astronomical observatory 'Q1976594': ['Tag:landuse=industrial'], # science park 'Q190928': ['Tag:landuse=industrial'], # shipyard 'Q4663385': ['Tag:historic=train_station', # former railway station 'Tag:railway=historic_station'], 'Q11997323': ['Tag:emergency=lifeboat_station'], # lifeboat station 'Q16884952': ['Tag:castle_type=stately', 'Tag:building=country_house'], # country house 'Q1343246': ['Tag:castle_type=stately', 'Tag:building=country_house'], # English country house 'Q4919932': ['Tag:castle_type=stately'], # stately home 'Q1763828': ['Tag:amenity=community_centre'], # multi-purpose hall 'Q3469910': ['Tag:amenity=community_centre'], # performing arts center 'Q57660343': ['Tag:amenity=community_centre'], # performing arts building 'Q163740': ['Tag:amenity=community_centre', # nonprofit organization 'Tag:amenity=social_facility', 'Key:social_facility'], 'Q41176': ['Key:building:levels'], # building 'Q44494': ['Tag:historic=mill'], # mill 'Q56822897': ['Tag:historic=mill'], # mill building 'Q2175765': ['Tag:public_transport=stop_area'], # tram stop 'Q179700': ['Tag:memorial=statue', # statue 'Tag:memorial:type=statue', 'Tag:historic=memorial'], 'Q1076486': ['Tag:landuse=recreation_ground'], # sports venue 'Q988108': ['Tag:amenity=community_centre', # club 'Tag:community_centre=club_home'], 'Q55004558': ['Tag:service=yard', 'Tag:landuse=railway'], # car barn 'Q19563580': ['Tag:landuse=railway'], # rail yard 'Q134447': ['Tag:generator:source=nuclear'], # nuclear power plant 'Q1258086': ['Tag:leisure=park', 'Tag:boundary=national_park'], # National Historic Site 'Q32350958': ['Tag:leisure=bingo'], # Bingo hall 'Q53060': ['Tag:historic=gate', # gate 'Tag:tourism=attraction'], 'Q3947': ['Tag:tourism=hotel', # house 'Tag:building=hotel', 'Tag:tourism=guest_house'], 'Q847017': ['Tag:leisure=sports_centre'], # sports club 'Q820477': ['Tag:landuse=quarry', 'Tag:gnis:feature_type=Mine'], # mine 'Q77115': ['Tag:leisure=sports_centre'], # community center 'Q35535': ['Tag:amenity=police'], # police 'Q16560': ['Tag:tourism=attraction', # palace 'Tag:historic=yes'], 'Q131734': ['Tag:amenity=pub', # brewery 'Tag:industrial=brewery'], 'Q828909': ['Tag:landuse=commercial', 'Tag:landuse=industrial', 'Tag:historic=dockyard'], # wharf 'Q10283556': ['Tag:landuse=railway'], # motive power depot 'Q18674739': ['Tag:leisure=stadium'], # event venue 'Q20672229': ['Tag:historic=archaeological_site'], # friary 'Q207694': ['Tag:museum=art'], # art museum 'Q22698': ['Tag:leisure=dog_park', 'Tag:amenity=market', 'Tag:place=square', 'Tag:leisure=common'], # park 'Q738570': ['Tag:place=suburb'], # central business district 'Q1133961': ['Tag:place=suburb'], # commercial district 'Q935277': ['Tag:gnis:ftype=Playa', 'Tag:natural=sand'], # salt pan 'Q14253637': ['Tag:gnis:ftype=Playa', 'Tag:natural=sand'], # dry lake 'Q63099748': ['Tag:tourism=hotel', # hotel building 'Tag:building=hotel', 'Tag:tourism=guest_house'], 'Q2997369': ['Tag:leisure=park', 'Tag:highway=pedestrian', 'Tag:foot=yes', 'Tag:area=yes', 'Tag:amenity=market', 'Tag:leisure=common'], # plaza 'Q130003': ['Tag:landuse=winter_sports', # ski resort 'Tag:site=piste', 'Tag:leisure=resort', 'Tag:landuse=recreation_ground'], } # search for items in bounding box that have an English Wikipedia article wikidata_enwiki_query = ''' SELECT ?place ?placeLabel (SAMPLE(?location) AS ?location) ?article WHERE { SERVICE wikibase:box { ?place wdt:P625 ?location . bd:serviceParam wikibase:cornerWest "Point({{ west }} {{ south }})"^^geo:wktLiteral . bd:serviceParam wikibase:cornerEast "Point({{ east }} {{ north }})"^^geo:wktLiteral . } ?article schema:about ?place . ?article schema:inLanguage "en" . ?article schema:isPartOf <https://en.wikipedia.org/> . FILTER NOT EXISTS { ?place wdt:P31 wd:Q18340550 } . # ignore timeline article FILTER NOT EXISTS { ?place wdt:P31 wd:Q13406463 } . # ignore list article FILTER NOT EXISTS { ?place wdt:P31 wd:Q17362920 } . # ignore Wikimedia duplicated page FILTER NOT EXISTS { ?place wdt:P31/wdt:P279* wd:Q192611 } . # ignore constituency FILTER NOT EXISTS { ?place wdt:P31 wd:Q811683 } . # ignore proposed building or structure SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } GROUP BY ?place ?placeLabel ?article ''' # search for items in bounding box that have an English Wikipedia article # look for coordinates in the headquarters location (P159) wikidata_enwiki_hq_query = ''' SELECT ?place ?placeLabel (SAMPLE(?location) AS ?location) ?article WHERE { ?place p:P159 ?statement . SERVICE wikibase:box { ?statement pq:P625 ?location . bd:serviceParam wikibase:cornerWest "Point({{ west }} {{ south }})"^^geo:wktLiteral . bd:serviceParam wikibase:cornerEast "Point({{ east }} {{ north }})"^^geo:wktLiteral . } ?article schema:about ?place . ?article schema:inLanguage "en" . ?article schema:isPartOf <https://en.wikipedia.org/> . SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } GROUP BY ?place ?placeLabel ?article ''' wikidata_point_query = ''' SELECT ?place (SAMPLE(?location) AS ?location) ?article WHERE { SERVICE wikibase:around { ?place wdt:P625 ?location . bd:serviceParam wikibase:center "Point({{ lon }} {{ lat }})"^^geo:wktLiteral . bd:serviceParam wikibase:radius "{{ '{:.1f}'.format(radius) }}" . } ?article schema:about ?place . ?article schema:inLanguage "en" . ?article schema:isPartOf <https://en.wikipedia.org/> . } GROUP BY ?place ?article ''' wikidata_subclass_osm_tags = ''' SELECT DISTINCT ?item ?itemLabel ?tag WHERE { { wd:{{qid}} wdt:P31/wdt:P279* ?item . ?item ((p:P1282/ps:P1282)|wdt:P641/(p:P1282/ps:P1282)|wdt:P140/(p:P1282/ps:P1282)|wdt:P366/(p:P1282/ps:P1282)) ?tag . } UNION { wd:{{qid}} wdt:P1435 ?item . ?item (p:P1282/ps:P1282) ?tag } SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } }''' # search for items in bounding box that have OSM tags in the subclass tree wikidata_item_tags = ''' SELECT ?place ?placeLabel (SAMPLE(?location) AS ?location) ?address ?street ?item ?itemLabel ?tag WHERE { SERVICE wikibase:box { ?place wdt:P625 ?location . bd:serviceParam wikibase:cornerWest "Point({{ west }} {{ south }})"^^geo:wktLiteral . bd:serviceParam wikibase:cornerEast "Point({{ east }} {{ north }})"^^geo:wktLiteral . } ?place wdt:P31/wdt:P279* ?item . ?item ((p:P1282/ps:P1282)|wdt:P641/(p:P1282/ps:P1282)|wdt:P140/(p:P1282/ps:P1282)|wdt:P366/(p:P1282/ps:P1282)) ?tag . OPTIONAL { ?place wdt:P969 ?address } . OPTIONAL { ?place wdt:P669 ?street } . FILTER NOT EXISTS { ?item wdt:P31 wd:Q18340550 } . # ignore timeline article FILTER NOT EXISTS { ?item wdt:P31 wd:Q13406463 } . # ignore list article FILTER NOT EXISTS { ?place wdt:P31 wd:Q17362920 } . # ignore Wikimedia duplicated page FILTER NOT EXISTS { ?place wdt:P31/wdt:P279* wd:Q192611 } . # ignore constituency FILTER NOT EXISTS { ?place wdt:P31 wd:Q811683 } . # ignore proposed building or structure SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } GROUP BY ?place ?placeLabel ?address ?street ?item ?itemLabel ?tag ''' # search for items in bounding box that have OSM tags in the subclass tree # look for coordinates in the headquarters location (P159) wikidata_hq_item_tags = ''' SELECT ?place ?placeLabel (SAMPLE(?location) AS ?location) ?address ?street ?item ?itemLabel ?tag WHERE { ?place p:P159 ?statement . SERVICE wikibase:box { ?statement pq:P625 ?location . bd:serviceParam wikibase:cornerWest "Point({{ west }} {{ south }})"^^geo:wktLiteral . bd:serviceParam wikibase:cornerEast "Point({{ east }} {{ north }})"^^geo:wktLiteral . } ?place wdt:P31/wdt:P279* ?item . ?item ((p:P1282/ps:P1282)|wdt:P641/(p:P1282/ps:P1282)|wdt:P140/(p:P1282/ps:P1282)|wdt:P366/(p:P1282/ps:P1282)) ?tag . OPTIONAL { ?place wdt:P969 ?address } . OPTIONAL { ?place wdt:P669 ?street } . FILTER NOT EXISTS { ?place wdt:P31/wdt:P279* wd:Q192611 } . # ignore constituencies SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } GROUP BY ?place ?placeLabel ?address ?street ?item ?itemLabel ?tag ''' # Q15893266 == former entity # Q56061 == administrative territorial entity next_level_query = ''' SELECT DISTINCT ?item ?itemLabel ?itemDescription ?startLabel (SAMPLE(?pop) AS ?pop) (SAMPLE(?area) AS ?area) (GROUP_CONCAT(DISTINCT ?isa) as ?isa_list) WHERE { VALUES ?start { wd:QID } . ?start wdt:P31/wdt:P279* ?subclass . ?subclass wdt:P150 ?nextlevel . ?item wdt:P131 ?start . ?item wdt:P31/wdt:P279* ?nextlevel . ?item wdt:P31/wdt:P279* wd:Q56061 . FILTER NOT EXISTS { ?item wdt:P31/wdt:P279* wd:Q15893266 } . FILTER NOT EXISTS { ?item wdt:P576 ?end } . OPTIONAL { ?item wdt:P1082 ?pop } . OPTIONAL { ?item p:P2046/psn:P2046/wikibase:quantityAmount ?area } . OPTIONAL { ?item wdt:P31 ?isa } . SERVICE wikibase:label { bd:serviceParam wikibase:language "LANGUAGE" } } GROUP BY ?item ?itemLabel ?itemDescription ?startLabel ORDER BY ?itemLabel ''' next_level_query3 = ''' SELECT DISTINCT ?item ?itemLabel ?itemDescription ?startLabel (SAMPLE(?pop) AS ?pop) (SAMPLE(?area) AS ?area) (GROUP_CONCAT(?isa) as ?isa_list) WHERE { VALUES ?start { wd:QID } . VALUES (?item) { PLACES } OPTIONAL { ?item wdt:P1082 ?pop } . OPTIONAL { ?item p:P2046/psn:P2046/wikibase:quantityAmount ?area } . OPTIONAL { ?item wdt:P31 ?isa } . SERVICE wikibase:label { bd:serviceParam wikibase:language "LANGUAGE" } } GROUP BY ?item ?itemLabel ?itemDescription ?startLabel ORDER BY ?itemLabel ''' next_level_has_part_query = ''' SELECT DISTINCT ?item ?itemLabel ?itemDescription ?startLabel (SAMPLE(?pop) AS ?pop) (SAMPLE(?area) AS ?area) (GROUP_CONCAT(DISTINCT ?isa) as ?isa_list) WHERE { VALUES ?start { wd:QID } . ?start wdt:P527 ?item . ?item wdt:P31/wdt:P279* wd:Q56061 . FILTER NOT EXISTS { ?item wdt:P31/wdt:P279* wd:Q15893266 } . FILTER NOT EXISTS { ?item wdt:P576 ?end } . OPTIONAL { ?item wdt:P1082 ?pop } . OPTIONAL { ?item p:P2046/psn:P2046/wikibase:quantityAmount ?area } . OPTIONAL { ?item wdt:P31 ?isa } . SERVICE wikibase:label { bd:serviceParam wikibase:language "LANGUAGE" } } GROUP BY ?item ?itemLabel ?itemDescription ?startLabel ORDER BY ?itemLabel ''' item_labels_query = ''' SELECT ?item ?itemLabel WHERE { VALUES ?item { ITEMS } SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } }''' item_types = ''' SELECT DISTINCT ?item ?type WHERE { VALUES ?item { ITEMS } { ?item wdt:P31/wdt:P279* ?type . ?type ((p:P1282/ps:P1282)|wdt:P641/(p:P1282/ps:P1282)|wdt:P140/(p:P1282/ps:P1282)|wdt:P366/(p:P1282/ps:P1282)) ?tag . FILTER(?tag != 'Key:amenity' && ?tag != 'Key:room' && ?tag != 'Key:man_made' && ?tag != 'Key:location') } UNION { ?item wdt:P31 ?type . VALUES (?type) { TYPES } } SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } ''' item_types_tree = ''' SELECT DISTINCT ?item ?itemLabel ?country ?countryLabel ?type ?typeLabel WHERE { { VALUES ?top { ITEMS } ?top wdt:P31/wdt:P279* ?item . ?item wdt:P279 ?type . ?type wdt:P279* ?subtype . ?subtype ((p:P1282/ps:P1282)|wdt:P641/(p:P1282/ps:P1282)|wdt:P140/(p:P1282/ps:P1282)|wdt:P366/(p:P1282/ps:P1282)) ?tag . } UNION { VALUES ?item { ITEMS } ?item wdt:P31 ?type . } OPTIONAL { ?item wdt:P17 ?country } SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } ''' subclasses = ''' SELECT DISTINCT ?item ?itemLabel ?type ?typeLabel WHERE { VALUES ?item { ITEMS } VALUES ?type { ITEMS } ?item wdt:P279* ?type . FILTER (?item != ?type) SERVICE wikibase:label { bd:serviceParam wikibase:language "en" } } ''' # administrative territorial entity of a single country (Q15916867) # 'Q349084'], # England -> district of England admin_area_map = { 'Q21': ['Q1136601', # England -> unitary authority of England 'Q211690', # | London borough 'Q1002812', # | metropolitan borough 'Q643815'], # | (non-)metropolitan county of England 'Q22': ['Q15060255'], # Scotland -> council area 'Q25': ['Q15979307'], # Wales -> principal area of Wales 'Q26': ['Q17364572'], # Northern Ireland -> district of Northern Ireland } next_level_query2 = ''' SELECT DISTINCT ?item ?itemLabel ?startLabel (SAMPLE(?pop) AS ?pop) (SAMPLE(?area) AS ?area) (GROUP_CONCAT(?isa) as ?isa_list) WHERE { VALUES ?start { wd:QID } . TYPES # metropolitan borough of the County of London (old) FILTER NOT EXISTS { ?item wdt:P31 wd:Q9046617 } . FILTER NOT EXISTS { ?item wdt:P31/wdt:P279* wd:Q19953632 } . FILTER NOT EXISTS { ?item wdt:P31/wdt:P279* wd:Q15893266 } . FILTER NOT EXISTS { ?item wdt:P576 ?end } . OPTIONAL { ?item wdt:P1082 ?pop } . OPTIONAL { ?item p:P2046/psn:P2046/wikibase:quantityAmount ?area } . OPTIONAL { ?item wdt:P31 ?isa } . SERVICE wikibase:label { bd:serviceParam wikibase:language "LANGUAGE" } } GROUP BY ?item ?itemLabel ?startLabel ORDER BY ?itemLabel ''' small_island_nations = { 'Q672', # Tuvalu } small_island_nations_query = ''' SELECT DISTINCT ?item ?itemLabel ?startLabel (SAMPLE(?pop) AS ?pop) (SAMPLE(?area) AS ?area) (GROUP_CONCAT(?isa) as ?isa_list) WHERE { VALUES ?start { wd:QID } . ?item wdt:P17 ?start . ?item wdt:P31/wdt:P279* wd:Q205895 . # landform FILTER NOT EXISTS { ?item wdt:P576 ?end } . OPTIONAL { ?item wdt:P1082 ?pop } . OPTIONAL { ?item p:P2046/psn:P2046/wikibase:quantityAmount ?area } . OPTIONAL { ?item wdt:P31 ?isa } . SERVICE wikibase:label { bd:serviceParam wikibase:language "LANGUAGE" } } GROUP BY ?item ?itemLabel ?startLabel ORDER BY ?itemLabel ''' countries_in_continent_query = ''' SELECT DISTINCT ?item ?itemLabel ?startLabel (SAMPLE(?pop) AS ?pop) (SAMPLE(?area) AS ?area) (GROUP_CONCAT(?isa) as ?isa_list) WHERE { VALUES ?start { wd:QID } . VALUES (?region) { (wd:Q3624078) # sovereign state (wd:Q161243) # dependent territory (wd:Q179164) # unitary state (wd:Q1763527) # constituent country (wd:Q734818) # condominium (wd:Q82794) # geographic region } ?item wdt:P30 ?start . ?item p:P31 ?statement . ?statement ps:P31 ?region . FILTER NOT EXISTS { ?item wdt:P31/wdt:P279* wd:Q15893266 } . FILTER NOT EXISTS { ?item wdt:P576 ?end } . OPTIONAL { ?item wdt:P1082 ?pop } . OPTIONAL { ?item p:P2046/psn:P2046/wikibase:quantityAmount ?area } . OPTIONAL { ?item wdt:P31 ?isa } . SERVICE wikibase:label { bd:serviceParam wikibase:language "LANGUAGE" } } GROUP BY ?item ?itemLabel ?startLabel ORDER BY ?itemLabel ''' # walk place hierarchy grabbing labels and country names located_in_query = ''' SELECT ?item ?itemLabel ?country ?countryLabel WHERE { SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } VALUES ?start { wd:QID } . ?start wdt:P131* ?item . OPTIONAL { ?item wdt:P17 ?country.} } ''' up_one_level_query = ''' SELECT ?startLabel ?itemLabel ?country1 ?country1Label ?country2 ?country2Label ?isa WHERE { SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } VALUES ?start { wd:QID } . OPTIONAL { ?start wdt:P17 ?country1 } OPTIONAL { ?start wdt:P131 ?item . ?item wdt:P31 ?isa . } OPTIONAL { ?item wdt:P17 ?country2 } } ''' next_level_type_map = { 'Q48091': ['Q1136601', # unitary authority of England 'Q211690', # London borough 'Q1002812', # metropolitan borough 'Q643815', # (non-)metropolitan county of England 'Q180673'], # ceremonial county of England 'Q1136601': ['Q1115575'], # civil parish 'Q1187580': ['Q1115575'], # civil parish } next_level_by_type = ''' SELECT DISTINCT ?item ?itemLabel ?startLabel (SAMPLE(?pop) AS ?pop) (SAMPLE(?area) AS ?area) (GROUP_CONCAT(?isa) as ?isa_list) WHERE { VALUES ?start { wd:QID } . TYPES ?item wdt:P131 ?start . FILTER NOT EXISTS { ?item wdt:P31/wdt:P279* wd:Q19953632 } . FILTER NOT EXISTS { ?item wdt:P31/wdt:P279* wd:Q15893266 } . FILTER NOT EXISTS { ?item wdt:P576 ?end } . OPTIONAL { ?item wdt:P1082 ?pop } . OPTIONAL { ?item p:P2046/psn:P2046/wikibase:quantityAmount ?area } . OPTIONAL { ?item wdt:P31 ?isa } . SERVICE wikibase:label { bd:serviceParam wikibase:language "LANGUAGE" } } GROUP BY ?item ?itemLabel ?startLabel ORDER BY ?itemLabel ''' instance_of_query = ''' SELECT DISTINCT ?item ?itemLabel ?countryLabel (SAMPLE(?location) AS ?location) WHERE { SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en" } ?item wdt:P31/wdt:P279* wd:QID . OPTIONAL { ?item wdt:P17 ?country } OPTIONAL { ?item wdt:P625 ?location } } GROUP BY ?item ?itemLabel ?countryLabel ''' continents_with_country_count_query = ''' SELECT ?continent ?continentLabel ?continentDescription ?banner (COUNT(?country) AS ?count) WHERE { SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } ?country wdt:P30 ?continent . ?country wdt:P31 wd:Q6256 . ?continent wdt:P948 ?banner } GROUP BY ?continent ?continentLabel ?continentDescription ?banner ORDER BY ?continentLabel ''' wikidata_query_api_url = 'https://query.wikidata.org/bigdata/namespace/wdq/sparql' def get_query(q, south, north, west, east): return render_template_string(q, south=south, north=north, west=west, east=east) def query_map(prefix, **kwargs): if kwargs.get('want_isa'): queries = ('item_tag', 'hq_item_tag') else: queries = ('enwiki', 'hq_enwiki', 'item_tag', 'hq_item_tag') return { name: render_template(f'wikidata_query/{prefix}_{name}.sparql', **kwargs) for name in queries } def bbox_query_map(south, north, west, east, **kwargs): return query_map('bbox', south=south, north=north, west=west, east=east, **kwargs) def point_query_map(lat, lon, radius_m): return query_map('point', lat=lat, lon=lon, radius=radius_m / 1_000) def get_enwiki_query(*args): return get_query(wikidata_enwiki_query, *args) def get_enwiki_hq_query(*args): return get_query(wikidata_enwiki_hq_query, *args) def get_item_tag_query(*args): return get_query(wikidata_item_tags, *args) def get_hq_item_tag_query(*args): return get_query(wikidata_hq_item_tags, *args) def get_point_query(lat, lon, radius): return render_template_string(wikidata_point_query, lat=lat, lon=lon, radius=float(radius) / 1000.0) def run_query(query, name=None, return_json=True, timeout=None, send_error_mail=False): attempts = 5 def error_mail(subject, r): if send_error_mail: mail.error_mail('wikidata query error', query, r) if name: filename = cache_filename(name + '.json') if os.path.exists(filename): return json.load(open(filename))['results']['bindings'] for attempt in range(attempts): try: # retry if we get a ChunkedEncodingError r = requests.post(wikidata_query_api_url, data={'query': query, 'format': 'json'}, timeout=timeout, headers=user_agent_headers()) if r.status_code != 200: break if name: open(filename, 'wb').write(r.content) if return_json: return r.json()['results']['bindings'] else: return r except requests.exceptions.ChunkedEncodingError: if attempt == attempts - 1: error_mail('wikidata query error', r) raise QueryError(query, r) # query timeout generates two different exceptions # java.lang.RuntimeException: java.util.concurrent.ExecutionException: com.bigdata.bop.engine.QueryTimeoutException: Query deadline is expired. # java.util.concurrent.TimeoutException if ('Query deadline is expired.' in r.text or 'java.util.concurrent.TimeoutException' in r.text): error_mail('wikidata query timeout', r) raise QueryTimeout(query, r) error_mail('wikidata query error', r) raise QueryError(query, r) def flatten_criteria(items): start = {'Tag:' + i[4:] + '=' for i in items if i.startswith('Key:')} return {i for i in items if not any(i.startswith(s) for s in start)} def wd_uri_to_id(value): return int(drop_start(value, wd_entity)) def wd_to_qid(wd): # expecting {'type': 'url', 'value': 'https://www.wikidata.org/wiki/Q30'} if wd['type'] == 'uri': return wd_uri_to_qid(wd['value']) def wd_uri_to_qid(value): if not value.startswith(wd_entity): print(repr(value)) assert value.startswith(wd_entity) return value[len(wd_entity) - 1:] def enwiki_url_to_title(url): return unquote(drop_start(url, enwiki)).replace('_', ' ') def parse_enwiki_query(rows): return {wd_to_qid(row['place']): { 'query_label': row['placeLabel']['value'], 'enwiki': enwiki_url_to_title(row['article']['value']), 'location': row['location']['value'], 'tags': set(), } for row in rows} def drop_tag_prefix(v): if v.startswith('Key:') and '=' not in v: return v[4:] if v.startswith('Tag:') and '=' in v: return v[4:] def parse_item_tag_query(rows, items): for row in rows: tag_or_key = drop_tag_prefix(row['tag']['value']) if not tag_or_key or tag_or_key in skip_tags: continue qid = wd_to_qid(row['place']) if not qid: continue if qid not in items: items[qid] = { 'query_label': row['placeLabel']['value'], 'location': row['location']['value'], 'tags': set(), } for k in 'address', 'street': if k in row: items[qid][k] = row[k]['value'] items[qid]['tags'].add(tag_or_key) def page_banner_from_entity(entity, **kwargs): property_key = 'P948' if property_key not in entity['claims']: return filename = entity['claims'][property_key][0]['mainsnak']['datavalue']['value'] try: images = commons.image_detail([filename], **kwargs) return images[filename] except Exception: return def entity_label(entity, language=None): if language and language in entity['labels']: return entity['labels'][language]['value'] if 'en' in entity['labels']: return entity['labels']['en']['value'] # pick a label at random return list(entity['labels'].values())[0]['value'] def entity_description(entity, language=None): if language and language in entity['descriptions']: return entity['descriptions'][language]['value'] if 'en' in entity['descriptions']: return entity['descriptions']['en']['value'] def names_from_entity(entity, skip_lang=None): if not entity or 'labels' not in entity: return if skip_lang is None: skip_lang = set() if not entity: return ret = defaultdict(list) cat_start = 'Category:' for k, v in entity['labels'].items(): if k in skip_lang: continue ret[v['value']].append(('label', k)) for k, v in entity['sitelinks'].items(): if k + 'wiki' in skip_lang: continue title = v['title'] if title.startswith(cat_start): title = title[len(cat_start):] first_letter = title[0] if first_letter.isupper(): lc_first_title = first_letter.lower() + title[1:] if lc_first_title in ret: title = lc_first_title ret[title].append(('sitelink', k)) for lang, value_list in entity.get('aliases', {}).items(): if lang in skip_lang or len(value_list) > 3: continue for name in value_list: ret[name['value']].append(('alias', lang)) commonscats = entity.get('claims', {}).get('P373', []) for i in commonscats: if 'datavalue' not in i['mainsnak']: if report_missing_values: mail.datavalue_missing('commons category', entity) continue value = i['mainsnak']['datavalue']['value'] ret[value].append(('commonscat', None)) officialname = entity.get('claims', {}).get('P1448', []) for i in officialname: if 'datavalue' not in i['mainsnak']: if report_missing_values: mail.datavalue_missing('official name', entity) continue value = i['mainsnak']['datavalue']['value'] ret[value['text']].append(('officialname', value['language'])) nativelabel = entity.get('claims', {}).get('P1705', []) for i in nativelabel: if 'datavalue' not in i['mainsnak']: if report_missing_values: mail.datavalue_missing('native label', entity) continue value = i['mainsnak']['datavalue']['value'] ret[value['text']].append(('nativelabel', value['language'])) image = entity.get('claims', {}).get('P18', []) for i in image: if 'datavalue' not in i['mainsnak']: if report_missing_values: mail.datavalue_missing('image', entity) continue value = i['mainsnak']['datavalue']['value'] m = re.search(r'\.[a-z]{3,4}$', value) if m: value = value[:m.start()] for pattern in r' - geograph\.org\.uk - \d+$', r'[, -]*0\d{2,}$': m = re.search(pattern, value) if m: value = value[:m.start()] break ret[value].append(('image', None)) return ret def parse_osm_keys(rows): start = 'http://www.wikidata.org/entity/' items = {} for row in rows: uri = row['item']['value'] qid = drop_start(uri, start) tag = row['tag']['value'] for i in 'Key:', 'Tag:': if tag.startswith(i): tag = tag[4:] # Ignore some overly generic tags from Wikidata objects: # facility (Q13226383) - osm tag: amenity # geographic location (Q2221906) - osm tag: location # artificial entity (Q16686448) - osm tag: man_made if tag in {'amenity', 'location', 'man_made'}: continue if qid not in items: items[qid] = { 'uri': uri, 'label': row['itemLabel']['value'], 'tags': set(), } items[qid]['tags'].add(tag) return items def get_location_hierarchy(qid, name=None): # not currently in use query = located_in_query.replace('QID', qid) return [{ 'qid': wd_to_qid(row['item']), 'label': row['itemLabel']['value'], 'country': row['countryLabel']['value'], } for row in run_query(query, name=name)] def up_one_level(qid, name=None): query = up_one_level_query.replace('QID', qid) try: rows = run_query(query, name=name, timeout=2) except requests.Timeout: return if not rows: return skip = { 'Q180673', # ceremonial county of England 'Q1138494', # historic county of England } ignore_up = any(wd_to_qid(row['isa']) in skip for row in rows) row = rows[0] c1 = 'country1' in row c2 = 'country2' in row return { 'name': row['startLabel']['value'], 'up': row['itemLabel']['value'] if not ignore_up else None, 'country_qid': wd_to_qid(row['country1']) if c1 else None, 'country_name': row['country1Label']['value'] if c1 else None, 'up_country_qid': wd_to_qid(row['country2']) if c2 else None, 'up_country_name': row['country2Label']['value'] if c2 else None, } def next_level_types(types): types = list(types) if len(types) == 1: return '?item wdt:P31/wdt:P279* wd:{} .'.format(types[0]) return ' union '.join('{ ?item wdt:P31/wdt:P279* wd:' + t + ' }' for t in types) def isa_list(types): types = list(types) if len(types) == 1: return '?item wdt:P31 wd:{} .'.format(types[0]) return ' union '.join('{ ?item wdt:P31 wd:' + t + ' }' for t in types) def get_next_level_query(qid, entity, language='en', name=None): claims = entity.get('claims', {}) isa = {i['mainsnak']['datavalue']['value']['id'] for i in claims.get('P31', [])} isa_continent = { 'Q5107', # continent 'Q855697', # subcontinent } types_from_isa = isa & next_level_type_map.keys() if types_from_isa: # use first match in type map type_list = next_level_type_map[list(types_from_isa)[0]] type_values = ' '.join(f'wd:{type_qid}' for type_qid in type_list) types = 'VALUES ?type {' + type_values + '} .\n?item wdt:P31 ?type .\n' query = next_level_by_type.replace('TYPES', types) elif isa & isa_continent: query = countries_in_continent_query elif qid in small_island_nations: query = small_island_nations_query elif qid in admin_area_map: types = next_level_types(admin_area_map[qid]) query = next_level_query2.replace('TYPES', types) elif 'P150' in claims: # P150 = contains administrative territorial entity places = [i['mainsnak']['datavalue']['value']['id'] for i in claims['P150']] query_places = ' '.join(f'(wd:{qid})' for qid in places) query = next_level_query3.replace('PLACES', query_places) elif 'Q82794' in isa and 'P527' in claims: places = [i['mainsnak']['datavalue']['value']['id'] for i in claims['P527']] query_places = ' '.join(f'(wd:{qid})' for qid in places) query = next_level_query3.replace('PLACES', query_places) else: query = next_level_query return query.replace('QID', qid).replace('LANGUAGE', language) def next_level_places(qid, entity, language=None, query=None, name=None): if not query: query = get_next_level_query(qid, entity, language=language) rows = [] t0 = time() r = run_query(query, name=name, return_json=False, send_error_mail=True) query_time = time() - t0 if query_time > 2: subject = f'next level places query took {query_time:.1f}' body = f'{request.url}\n\n{query}' mail.send_mail(subject, body) query_rows = r.json()['results']['bindings'] if any('isa_list' not in row for row in query_rows): mail.error_mail('wikidata browse query error', query, r) raise QueryError(query, r) if not query_rows and 'P527' in entity['claims']: query = (next_level_has_part_query.replace('QID', qid) .replace('LANGUAGE', language)) r = run_query(query, name=name, return_json=False, send_error_mail=True) query_rows = r.json()['results']['bindings'] if not query_rows: claims = entity.get('claims', {}) located_in = {i['mainsnak']['datavalue']['value']['id'] for i in claims.get('P131', [])} for located_in_qid in located_in: located_in_entity = get_entity(located_in_qid) query = get_next_level_query(located_in_qid, located_in_entity, language=language) r = run_query(query, return_json=False, send_error_mail=True) located_in_rows = r.json()['results']['bindings'] query_rows += located_in_rows for row in query_rows: item_id = wd_uri_to_id(row['item']['value']) qid = 'Q{:d}'.format(item_id) isa_list = [] for url in row['isa_list']['value'].split(' '): if not url: continue isa_qid = wd_uri_to_qid(url) if isa_qid not in isa_list: isa_list.append(isa_qid) pop = row.get('pop') # https://www.wikidata.org/wiki/Q896427 has 'unknown value' for population if pop: try: pop_value = int(pop['value']) except ValueError: pop_value = None else: pop_value = None i = { 'population': pop_value, 'area': (int(float(row['area']['value']) / 1e6) if row.get('area') else None), 'label': row['itemLabel']['value'], 'description': (row['itemDescription']['value'] if 'itemDescription' in row else None), 'start': row['startLabel']['value'], 'item_id': item_id, 'qid': qid, 'isa': isa_list, } rows.append(i) return rows def query_for_items(query, items): assert items query_items = ' '.join(f'wd:{qid}' for qid in items) return query.replace('ITEMS', query_items) def get_item_labels(items): query = query_for_items(item_labels_query, items) rows = [] for row in run_query(query): item_id = wd_uri_to_id(row['item']['value']) qid = 'Q{:d}'.format(item_id) i = { 'label': row['itemLabel']['value'], 'item_id': item_id, 'qid': qid, } rows.append(i) return rows def row_qid_and_label(row, name): qid = wd_to_qid(row[name]) if not qid: return return {'qid': qid, 'label': row[name + 'Label']['value']} def get_isa(items, name=None): graph = item_types_graph(items, name=name) ret = {} for qid in items: if qid not in graph: continue visited, queue = set(), [qid] result = [] while queue: vertex = queue.pop(0) if vertex in visited: continue if vertex != qid: result.append(graph[vertex]) visited.add(vertex) if ((vertex == qid or 'country' in graph[vertex]) and 'children' in graph[vertex]): queue.extend(graph[vertex]['children'] - visited) drop = set() for i in result[:]: if not (len(i.get('children', [])) == 1 and 'country' in i and any(c.isupper() for c in i['label'])): continue child = graph[list(i['children'])[0]]['label'] if i['label'].startswith(child): drop.add(i['qid']) else: i['label'] += f' ({child})' result = [i for i in result if i['qid'] not in drop] all_children = set() for i in result: if 'children' in i: all_children.update(i.pop('children')) if 'country' in i: del i['country'] ret[qid] = [i for i in result if i['qid'] not in all_children] return ret def item_types_graph(items, name=None, rows=None): if rows is None: query = query_for_items(item_types_tree, items) rows = run_query(query, name=name, send_error_mail=False) graph = {} for row in rows: item_qid = wd_to_qid(row['item']) type_qid = wd_to_qid(row['type']) if not item_qid or not type_qid: continue if type_qid not in graph: graph[type_qid] = { 'qid': type_qid, 'label': row['typeLabel']['value'], 'children': set(), } if item_qid not in graph: graph[item_qid] = { 'qid': item_qid, 'label': row['itemLabel']['value'], 'children': set(), } if 'country' in row and 'country' not in graph[item_qid]: country = row_qid_and_label(row, 'country') if country: graph[item_qid]['country'] = country graph[item_qid]['children'].add(type_qid) return graph def find_superclasses(items, name=None): query = query_for_items(subclasses, items) return {(wd_to_qid(row['item']), wd_to_qid(row['type'])) for row in run_query(query, name=name)} def claim_value(claim): try: return claim['mainsnak']['datavalue']['value'] except KeyError: pass def country_iso_codes_from_qid(qid): item = WikidataItem.retrieve_item(qid) extra = {'Q159583': 'VA'} # Holy See no_iso_3166_code = { 'Q23427', # South Ossetia 'Q3315371', # Global Affairs Canada 'Q170355', # Indigenous Australians 'Q6605', # Sakha Republic 'Q53492009', # Embassy of the United States, Jerusalem } # Embassy of Canada, Washington, D.C. (Q137245) has two values in the # operator (P137) property: Canada (Q16) and Global Affairs Canada (Q3315371) # We ignore the second one # Aboriginal Tent Embassy (Q189212) has the operator (P137) property as # Indigenous Australians (Q170355) # Tel Aviv Branch Office of the Embassy of the United States (Q53444085) # operator (P137): Embassy of the United States, Jerusalem (Q53492009) if qid in no_iso_3166_code: return for wikidata_property in ('P297', 'P298'): if qid in extra or item.claims.get(wikidata_property): continue body = 'https://www.wikidata.org/wiki/' + qid mail.send_mail(f'{qid}: {wikidata_property} is missing', body) codes = [claim_value(c) for c in item.claims.get('P297') or []] codes += [claim_value(c) for c in item.claims.get('P298') or []] if qid in extra: codes.append(extra[qid]) return [i for i in codes if i is not None] class WikidataItem: def __init__(self, qid, entity): assert entity self.qid = qid self.entity = entity @classmethod def retrieve_item(cls, qid): entity = get_entity(qid) if not entity: return item = cls(qid, entity) return item @property def claims(self): return self.entity['claims'] @property def labels(self): return self.entity.get('labels', {}) @property def aliases(self): return self.entity.get('aliases', {}) @property def sitelinks(self): return self.entity.get('sitelinks', {}) def get_sitelinks(self): '''List of sitelinks with language names in English.''' sitelinks = [] for key, value in self.sitelinks.items(): if len(key) != 6 or not key.endswith('wiki'): continue lang = key[:2] url = 'https://{}.wikipedia.org/wiki/{}'.format(lang, value['title'].replace(' ', '_')) sitelinks.append({ 'code': lang, 'lang': get_language_label(lang), 'url': url, 'title': value['title'], }) sitelinks.sort(key=lambda i: i['lang']) return sitelinks def remove_badges(self): if 'sitelinks' not in self.entity: return for v in self.entity['sitelinks'].values(): if 'badges' in v: del v['badges'] def first_claim_value(self, key): return claim_value(self.claims[key][0]) @property def has_coords(self): try: self.first_claim_value('P625') except (IndexError, KeyError): return False return True @property def has_earth_coords(self): earth = 'http://www.wikidata.org/entity/Q2' return self.has_coords and self.first_claim_value('P625')['globe'] == earth @property def coords(self): if not self.has_coords: return None, None c = self.first_claim_value('P625') return c['latitude'], c['longitude'] @property def nrhp(self): try: nrhp = self.first_claim_value('P649') except (IndexError, KeyError): return if nrhp.isdigit(): return nrhp def get_oql(self, criteria, radius): nrhp = self.nrhp if not criteria: return lat, lon = self.coords if lat is None or lon is None: return osm_filter = 'around:{},{:.5f},{:.5f}'.format(radius, lat, lon) union = [] for tag_or_key in sorted(criteria): union += overpass.oql_from_wikidata_tag_or_key(tag_or_key, osm_filter) if nrhp: union += ['\n {}({})["ref:nrhp"={}];'.format(t, osm_filter, nrhp) for t in ('node', 'way', 'rel')] # FIXME extend oql to also check is_in # like this: # # is_in(48.856089,2.29789); # area._[admin_level]; # out tags; oql = ('[timeout:300][out:json];\n' + '({}\n);\n' + 'out center tags;').format(''.join(union)) return oql def trim_location_from_names(self, wikidata_names): if 'P131' not in self.entity['claims']: return location_names = set() located_in = [i['mainsnak']['datavalue']['value']['id'] for i in self.entity['claims']['P131'] if 'datavalue' in i['mainsnak']] # Parc naturel régional des marais du Cotentin et du Bessin (Q2138341) # is in more than 50 locations. The maximum entities in one request is 50. if len(located_in) > 50: return for location in get_entities(located_in): if 'labels' not in location: continue location_names |= {v['value'] for v in location['labels'].values() if v['value'] not in wikidata_names} for name_key, name_values in list(wikidata_names.items()): for n in location_names: new = None if name_key.startswith(n + ' '): new = name_key[len(n) + 1:] elif name_key.endswith(', ' + n): new = name_key[:-(len(n) + 2)] if new and new not in wikidata_names: wikidata_names[new] = name_values def osm_key_query(self): return render_template_string(wikidata_subclass_osm_tags, qid=self.qid) @property def osm_keys(self): if hasattr(self, '_osm_keys'): return self._osm_keys self._osm_keys = run_query(self.osm_key_query()) return self._osm_keys def languages_from_country(self): langs = [] for country in self.claims.get('P17', []): c = claim_value(country) if not c: continue for l in language.get_country_lanaguage(c['numeric-id']): if l not in langs: langs.append(l) return langs def query_language_from_country(self): if hasattr(self, '_language_codes'): return self._language_codes query = ''' SELECT DISTINCT ?code WHERE { wd:QID wdt:P17 ?country . ?country wdt:P37 ?lang . ?lang wdt:P424 ?code . }'''.replace('QID', self.qid) rows = run_query(query) self._language_codes = [row['code']['value'] for row in rows] return self._language_codes def label(self, lang=None): labels = self.labels sitelinks = [i[:-4] for i in self.sitelinks.keys() if i.endswith('wiki')] if not labels: return if lang and lang in labels: # requested language return labels[lang]['value'] language_codes = self.languages_from_country() for code in language_codes: if code in labels and code in sitelinks: return labels[code]['value'] for code in language_codes: if code in labels: return labels[code]['value'] if 'en' in labels: return labels['en']['value'] for code in sitelinks: if code in labels: return labels[code]['value'] return list(labels.values())[0]['value'] @property def names(self): return dict(names_from_entity(self.entity)) @property def is_a(self): return [isa['mainsnak']['datavalue']['value']['id'] for isa in self.entity.get('claims', {}).get('P31', [])] @property def is_a_detail(self): return [WikidataItem.retrieve_item(qid) for qid in self.is_a] def is_proposed(self): '''is this a proposed building or structure (Q811683)?''' return 'Q811683' in self.is_a def criteria(self): items = {row['tag']['value'] for row in self.osm_keys} for is_a in self.is_a: items |= set(extra_keys.get(is_a, [])) # Ignore some overly generic tags from Wikidata objects: # facility (Q13226383) - osm key: amenity # geographic location (Q2221906) - osm key: location # artificial entity (Q16686448) - osm key: man_made # room (Q180516) - osm key: room items.discard('Key:amenity') items.discard('Key:location') items.discard('Key:man_made') items.discard('Key:room') return items def report_broken_wikidata_osm_tags(self): start_allowed = ('Key', 'Tag', 'Role', 'Relation') for row in self.osm_keys: value = row['tag']['value'] if any(value.startswith(f'{start}:') for start in start_allowed): continue isa_item_id = wd_uri_to_id(row['item']['value']) isa_qid = 'Q{:d}'.format(isa_item_id) body = f''' qid: {self.qid}\n IsA: https://osm.wikidata.link/reports/isa/{isa_qid} row: {repr(row)}\n''' mail.send_mail('broken OSM tag in Wikidata', body) def find_nrhp_match(self, overpass_reply): nrhp = self.nrhp if not nrhp: return osm = [e for e in overpass_reply if e['tags'].get('ref:nrhp') == nrhp] if len(osm) == 1: return osm[0] def parse_item_query(self, criteria, overpass_reply): nrhp_match = self.find_nrhp_match(overpass_reply) if nrhp_match: return [(nrhp_match, None)] wikidata_names = self.names self.trim_location_from_names(wikidata_names) endings = matcher.get_ending_from_criteria({i.partition(':')[2] for i in criteria}) found = [] for element in overpass_reply: m = match.check_for_match(element['tags'], wikidata_names, endings=endings) if m: element['key'] = '{0[type]:s}_{0[id]:d}'.format(element) found.append((element, m)) return found
EdwardBetts/osm-wikidata
matcher/wikidata.py
Python
gpl-3.0
54,264
# -*- Mode: Python; test-case-name: flumotion.test.test_feedcomponent010 -*- # vi:si:et:sw=4:sts=4:ts=4 # # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007,2008 Fluendo, S.L. (www.fluendo.com). # All rights reserved. # This file may be distributed and/or modified under the terms of # the GNU General Public License version 2 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.GPL" in the source distribution for more information. # Licensees having purchased or holding a valid Flumotion Advanced # Streaming Server license may use this file in accordance with the # Flumotion Advanced Streaming Server Commercial License Agreement. # See "LICENSE.Flumotion" in the source distribution for more information. # Headers in this file shall remain intact. import gettext import os import gtk import gtk.glade from twisted.python import util from twisted.internet import defer from zope.interface import implements from flumotion.common import errors, log, messages from flumotion.common.i18n import N_, gettexter from flumotion.configure import configure from flumotion.twisted import flavors from flumotion.ui.fgtk import ProxyWidgetMapping _ = gettext.gettext __version__ = "$Rev$" T_ = gettexter() class BaseAdminGtkNode(log.Loggable): """ I am a base class for all GTK+-based Admin UI nodes. I am a view on a set of properties for a component. @ivar widget: the main widget representing this node @type widget: L{gtk.Widget} @ivar wtree: the widget tree representation for this node """ implements(flavors.IStateListener) logCategory = "admingtk" gladeFile = None ## Relative path of the glade file. ## e.g. "flumotion/ui.glade" gettextDomain = configure.PACKAGE def __init__(self, state, admin, title=None): """ @param state: state of component this is a UI node for @type state: L{flumotion.common.planet.AdminComponentState} @param admin: the admin model that interfaces with the manager for us @type admin: L{flumotion.admin.admin.AdminModel} @param title: the (translated) title to show this node with @type title: str """ self._debugEnabled = False self.state = state self.admin = admin self.statusbar = None self.title = title self.nodes = util.OrderedDict() self.wtree = None # glade.XML instance (optionally set) self.widget = None # the top level widget that will be visible self.uiState = None # set if we are listening self._pendingUIState = None # set if we are waiting for the ui # to load ## Absolute path to the glade file. ## e.g. "/home/flu/.flumotion/cache/test/80...df7/flumotion/ui.glade self._gladefilepath = None def setDebugEnabled(self, enabled): """Set if debug should be enabled. Not all pages are visible unless debugging is set to true @param enabled: whether debug should be enabled @type enabled: bool """ self._debugEnabled = enabled def cleanup(self): if self.uiState: self.uiState.removeListener(self) def status_push(self, str): if self.statusbar: return self.statusbar.push('notebook', str) def status_pop(self, mid): if self.statusbar: return self.statusbar.remove('notebook', mid) def callRemote(self, methodName, *args, **kwargs): return self.admin.componentCallRemote(self.state, methodName, *args, **kwargs) # FIXME: do this automatically if there is a gladeFile class attr set def loadGladeFile(self, gladeFile, domain=configure.PACKAGE): """ Returns: a deferred returning the widget tree from the glade file. """ def _getBundledFileCallback(result, gladeFile): path = result if not os.path.exists(path): self.warning("Glade file %s not found in path %s" % ( gladeFile, path)) self.debug("loading widget tree from %s" % path) old = gtk.glade.textdomain() self.debug("Switching glade text domain from %s to %s" % ( old, domain)) self._gladefilepath = path gtk.glade.textdomain(domain) self.wtree = gtk.glade.XML(path, typedict=ProxyWidgetMapping()) self.debug("Switching glade text domain back from %s to %s" % ( domain, old)) gtk.glade.textdomain(old) return self.wtree # The manager is always using / as a path separator, to avoid # confusion, convert os.path.sep -> / here. gladeFile = gladeFile.replace(os.path.sep, '/') # FIXME: this does needless roundtrips; should instead be # loading from the already-downloaded paths self.debug("requesting bundle for glade file %s" % gladeFile) d = self.admin.bundleLoader.getFile(gladeFile) d.addCallback(_getBundledFileCallback, gladeFile) return d def getWidget(self, name): if not self.wtree: raise IndexError widget = self.wtree.get_widget(name) if not widget: self.warning('Could not get widget %s' % name) return widget def createWidget(self, name): """ Create a new widget instance from the glade file. Can be used to make multiple instances of the same widget. """ if not self._gladefilepath: raise IndexError wtree = gtk.glade.XML(self._gladefilepath, name, typedict=ProxyWidgetMapping()) widget = wtree.get_widget(name) if not widget: self.warning('Could not create widget %s' % name) return widget def haveWidgetTree(self): """ I am called when the widget tree has been gotten from the glade file. Responsible for setting self.widget. Override me to act on it. """ pass def gotUIState(self, state): if self.widget: self.setUIState(state) else: self._pendingUIState = state def setUIState(self, state): """ Called by the BaseAdminGtk when it gets the UI state and the GUI is ready. Chain up if you provide your own implementation. """ self.uiState = state state.addListener(self, set_=self.stateSet, append=self.stateAppend, remove=self.stateRemove, setitem=self.stateSetitem, delitem=self.stateDelitem) def stateSet(self, state, key, value): "Override me" pass def stateAppend(self, state, key, value): "Override me" pass def stateRemove(self, state, key, value): "Override me" pass def stateSetitem(self, state, key, subkey, value): "Override me" pass def stateDelitem(self, state, key, subkey, value): "Override me" pass def render(self): """ Render the GTK+ admin view for this component. Returns: a deferred returning the main widget for embedding """ self.debug('BaseAdminGtkNode.render() for %s' % self.title) # clear up previous error messages allmessages = self.state.get('messages', []) for message in allmessages: # since we can have multiple nodes, only remove the one from # ours; this assumes each node's title is unique for a component if message.id == 'render-%s' % self.title: self.debug('Removing previous messages %r' % message) self.state.observe_remove('messages', message) def error(debug): # add an error message to the component and return # an error label, given a debug string self.warning("error rendering component UI; debug %s", debug) m = messages.Error(T_(N_( "Internal error in component UI's '%s' tab. " "Please file a bug against the component."), self.title), debug=debug, mid="render-%s" % self.title) self.addMessage(m) label = gtk.Label(_("Internal error.\nSee component error " "message\nfor more details.")) # if we don't set this error as our label, we will raise # a TypeError below and obscure this more meaningful error self.widget = label return label def loadGladeFile(): # F0.8 if hasattr(self, 'glade_file'): self.gladeFile = self.glade_file debug = "class %r should have glade_file " \ "changed to gladeFile" % self.__class__ import warnings warnings.warn(debug, DeprecationWarning) m = messages.Warning(T_(N_( "Internal error in component UI's '%s' tab. " "Please file a bug against the component."), self.title), debug=debug, mid="render-%s" % self.title) self.addMessage(m) if not self.gladeFile: return defer.succeed(None) def haveWtree(wtree): self.wtree = wtree self.debug('render: calling haveWidgetTree') try: self.haveWidgetTree() except Exception, e: return error(log.getExceptionMessage(e)) self.debug('render: loading glade file %s in text domain %s', self.gladeFile, self.gettextDomain) d = self.loadGladeFile(self.gladeFile, self.gettextDomain) d.addCallback(haveWtree) return d def loadGladeFileErrback(failure): if failure.check(RuntimeError): return error( 'Could not load glade file %s.' % self.gladeFile) if failure.check(errors.NoBundleError): return error( 'No bundle found containing %s.' % self.gladeFile) return failure def renderFinishedCallback(_): if not self.widget: self.debug('render: no self.widget, failing') raise TypeError( '%r.haveWidgetTree should have set self.widget' % self.__class__) if self._pendingUIState: self.debug('render: calling setUIState on the node') self.setUIState(self._pendingUIState) self.debug('renderFinished: returning widget %s', self.widget) return self.widget def renderFinishedErrback(failure): return error(log.getFailureMessage(failure)) d = loadGladeFile() d.addErrback(loadGladeFileErrback) d.addCallback(renderFinishedCallback) d.addErrback(renderFinishedErrback) return d def addMessage(self, message): """ Add a message to the component. Since this is called in a component view and only relevant to the component view, the message only exists in the view, and is not replicated to the manager state. The message will be displayed in the usual message view. @type message: L{flumotion.common.messages.Message} """ self.state.observe_append('messages', message)
ylatuya/Flumotion
flumotion/component/base/baseadminnode.py
Python
gpl-2.0
11,796
#!/usr/bin/env python # Copyright (C) 2005 Bram Cohen, Copyright (C) 2005, 2006 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA #from __future__ import absolute_import from bisect import bisect import difflib __all__ = ['PatienceSequenceMatcher', 'unified_diff', 'unified_diff_files'] def unique_lcs_py(a, b): """Find the longest common subset for unique lines. :param a: An indexable object (such as string or list of strings) :param b: Another indexable object (such as string or list of strings) :return: A list of tuples, one for each line which is matched. [(line_in_a, line_in_b), ...] This only matches lines which are unique on both sides. This helps prevent common lines from over influencing match results. The longest common subset uses the Patience Sorting algorithm: http://en.wikipedia.org/wiki/Patience_sorting """ # set index[line in a] = position of line in a unless # a is a duplicate, in which case it's set to None index = {} for i in xrange(len(a)): line = a[i] if line in index: index[line] = None else: index[line]= i # make btoa[i] = position of line i in a, unless # that line doesn't occur exactly once in both, # in which case it's set to None btoa = [None] * len(b) index2 = {} for pos, line in enumerate(b): next = index.get(line) if next is not None: if line in index2: # unset the previous mapping, which we now know to # be invalid because the line isn't unique btoa[index2[line]] = None del index[line] else: index2[line] = pos btoa[pos] = next # this is the Patience sorting algorithm # see http://en.wikipedia.org/wiki/Patience_sorting backpointers = [None] * len(b) stacks = [] lasts = [] k = 0 for bpos, apos in enumerate(btoa): if apos is None: continue # as an optimization, check if the next line comes at the end, # because it usually does if stacks and stacks[-1] < apos: k = len(stacks) # as an optimization, check if the next line comes right after # the previous line, because usually it does elif stacks and stacks[k] < apos and (k == len(stacks) - 1 or stacks[k+1] > apos): k += 1 else: k = bisect(stacks, apos) if k > 0: backpointers[bpos] = lasts[k-1] if k < len(stacks): stacks[k] = apos lasts[k] = bpos else: stacks.append(apos) lasts.append(bpos) if len(lasts) == 0: return [] result = [] k = lasts[-1] while k is not None: result.append((btoa[k], k)) k = backpointers[k] result.reverse() return result def recurse_matches_py(a, b, alo, blo, ahi, bhi, answer, maxrecursion): """Find all of the matching text in the lines of a and b. :param a: A sequence :param b: Another sequence :param alo: The start location of a to check, typically 0 :param ahi: The start location of b to check, typically 0 :param ahi: The maximum length of a to check, typically len(a) :param bhi: The maximum length of b to check, typically len(b) :param answer: The return array. Will be filled with tuples indicating [(line_in_a, line_in_b)] :param maxrecursion: The maximum depth to recurse. Must be a positive integer. :return: None, the return value is in the parameter answer, which should be a list """ if maxrecursion < 0: print 'max recursion depth reached' # this will never happen normally, this check is to prevent DOS attacks return oldlength = len(answer) if alo == ahi or blo == bhi: return last_a_pos = alo-1 last_b_pos = blo-1 for apos, bpos in unique_lcs_py(a[alo:ahi], b[blo:bhi]): # recurse between lines which are unique in each file and match apos += alo bpos += blo # Most of the time, you will have a sequence of similar entries if last_a_pos+1 != apos or last_b_pos+1 != bpos: recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1, apos, bpos, answer, maxrecursion - 1) last_a_pos = apos last_b_pos = bpos answer.append((apos, bpos)) if len(answer) > oldlength: # find matches between the last match and the end recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1, ahi, bhi, answer, maxrecursion - 1) elif a[alo] == b[blo]: # find matching lines at the very beginning while alo < ahi and blo < bhi and a[alo] == b[blo]: answer.append((alo, blo)) alo += 1 blo += 1 recurse_matches_py(a, b, alo, blo, ahi, bhi, answer, maxrecursion - 1) elif a[ahi - 1] == b[bhi - 1]: # find matching lines at the very end nahi = ahi - 1 nbhi = bhi - 1 while nahi > alo and nbhi > blo and a[nahi - 1] == b[nbhi - 1]: nahi -= 1 nbhi -= 1 recurse_matches_py(a, b, last_a_pos+1, last_b_pos+1, nahi, nbhi, answer, maxrecursion - 1) for i in xrange(ahi - nahi): answer.append((nahi + i, nbhi + i)) def _collapse_sequences(matches): """Find sequences of lines. Given a sequence of [(line_in_a, line_in_b),] find regions where they both increment at the same time """ answer = [] start_a = start_b = None length = 0 for i_a, i_b in matches: if (start_a is not None and (i_a == start_a + length) and (i_b == start_b + length)): length += 1 else: if start_a is not None: answer.append((start_a, start_b, length)) start_a = i_a start_b = i_b length = 1 if length != 0: answer.append((start_a, start_b, length)) return answer def _check_consistency(answer): # For consistency sake, make sure all matches are only increasing next_a = -1 next_b = -1 for (a, b, match_len) in answer: if a < next_a: raise ValueError('Non increasing matches for a') if b < next_b: raise ValueError('Non increasing matches for b') next_a = a + match_len next_b = b + match_len class PatienceSequenceMatcher_py(difflib.SequenceMatcher): """Compare a pair of sequences using longest common subset.""" _do_check_consistency = True def __init__(self, isjunk=None, a='', b=''): if isjunk is not None: raise NotImplementedError('Currently we do not support' ' isjunk for sequence matching') difflib.SequenceMatcher.__init__(self, isjunk, a, b) def get_matching_blocks(self): """Return list of triples describing matching subsequences. Each triple is of the form (i, j, n), and means that a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in i and in j. The last triple is a dummy, (len(a), len(b), 0), and is the only triple with n==0. >>> s = PatienceSequenceMatcher(None, "abxcd", "abcd") >>> s.get_matching_blocks() [(0, 0, 2), (3, 2, 2), (5, 4, 0)] """ # jam 20060525 This is the python 2.4.1 difflib get_matching_blocks # implementation which uses __helper. 2.4.3 got rid of helper for # doing it inline with a queue. # We should consider doing the same for recurse_matches if self.matching_blocks is not None: return self.matching_blocks matches = [] recurse_matches_py(self.a, self.b, 0, 0, len(self.a), len(self.b), matches, 10) # Matches now has individual line pairs of # line A matches line B, at the given offsets self.matching_blocks = _collapse_sequences(matches) self.matching_blocks.append( (len(self.a), len(self.b), 0) ) if PatienceSequenceMatcher_py._do_check_consistency: if __debug__: _check_consistency(self.matching_blocks) return self.matching_blocks
khertan/KhtNotes
khtnotes/merge3/_patiencediff_py.py
Python
gpl-3.0
9,146
# coding: utf-8 """Form mixins for approvable models.""" from approval.models import ApprovedModel class ApprovableFormMixin: """ModelForm mixin for monitored models.""" def __init__(self, *args, **kwargs): """ Form initializer for ApprovedModel. The form is initialized with the instance data fetched from the sandbox. """ instance = kwargs.get("instance", None) if instance and isinstance(instance, ApprovedModel): instance.approval._update_source() super().__init__(*args, **kwargs)
artscoop/django-approval
approval/forms/approvable.py
Python
mit
567
# -*- test-case-name: twisted.test.test_paths -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Object-oriented filesystem path representation. """ from __future__ import division, absolute_import import os import sys import errno import base64 from hashlib import sha1 from os.path import isabs, exists, normpath, abspath, splitext from os.path import basename, dirname, join as joinpath from os import listdir, utime, stat from stat import S_ISREG, S_ISDIR, S_IMODE, S_ISBLK, S_ISSOCK from stat import S_IRUSR, S_IWUSR, S_IXUSR from stat import S_IRGRP, S_IWGRP, S_IXGRP from stat import S_IROTH, S_IWOTH, S_IXOTH from zope.interface import Interface, Attribute, implementer # Please keep this as light as possible on other Twisted imports; many, many # things import this module, and it would be good if it could easily be # modified for inclusion in the standard library. --glyph from twisted.python.compat import comparable, cmp, unicode from twisted.python.deprecate import deprecated from twisted.python.runtime import platform from twisted.python.versions import Version from twisted.python.win32 import ERROR_FILE_NOT_FOUND, ERROR_PATH_NOT_FOUND from twisted.python.win32 import ERROR_INVALID_NAME, ERROR_DIRECTORY, O_BINARY from twisted.python.win32 import WindowsError from twisted.python.util import FancyEqMixin _CREATE_FLAGS = (os.O_EXCL | os.O_CREAT | os.O_RDWR | O_BINARY) def _stub_islink(path): """ Always return C{False} if the operating system does not support symlinks. @param path: A path string. @type path: L{str} @return: C{False} @rtype: L{bool} """ return False islink = getattr(os.path, 'islink', _stub_islink) randomBytes = os.urandom armor = base64.urlsafe_b64encode class IFilePath(Interface): """ File path object. A file path represents a location for a file-like-object and can be organized into a hierarchy; a file path can can children which are themselves file paths. A file path has a name which unique identifies it in the context of its parent (if it has one); a file path can not have two children with the same name. This name is referred to as the file path's "base name". A series of such names can be used to locate nested children of a file path; such a series is referred to as the child's "path", relative to the parent. In this case, each name in the path is referred to as a "path segment"; the child's base name is the segment in the path. When representing a file path as a string, a "path separator" is used to delimit the path segments within the string. For a file system path, that would be C{os.sep}. Note that the values of child names may be restricted. For example, a file system path will not allow the use of the path separator in a name, and certain names (e.g. C{"."} and C{".."}) may be reserved or have special meanings. @since: 12.1 """ sep = Attribute("The path separator to use in string representations") def child(name): """ Obtain a direct child of this file path. The child may or may not exist. @param name: the name of a child of this path. C{name} must be a direct child of this path and may not contain a path separator. @return: the child of this path with the given C{name}. @raise InsecurePath: if C{name} describes a file path that is not a direct child of this file path. """ def open(mode="r"): """ Opens this file path with the given mode. @return: a file-like object. @raise Exception: if this file path cannot be opened. """ def changed(): """ Clear any cached information about the state of this path on disk. """ def getsize(): """ Retrieve the size of this file in bytes. @return: the size of the file at this file path in bytes. @raise Exception: if the size cannot be obtained. """ def getModificationTime(): """ Retrieve the time of last access from this file. @return: a number of seconds from the epoch. @rtype: L{float} """ def getStatusChangeTime(): """ Retrieve the time of the last status change for this file. @return: a number of seconds from the epoch. @rtype: L{float} """ def getAccessTime(): """ Retrieve the time that this file was last accessed. @return: a number of seconds from the epoch. @rtype: L{float} """ def exists(): """ Check if this file path exists. @return: C{True} if the file at this file path exists, C{False} otherwise. @rtype: L{bool} """ def isdir(): """ Check if this file path refers to a directory. @return: C{True} if the file at this file path is a directory, C{False} otherwise. """ def isfile(): """ Check if this file path refers to a regular file. @return: C{True} if the file at this file path is a regular file, C{False} otherwise. """ def children(): """ List the children of this path object. @return: a sequence of the children of the directory at this file path. @raise Exception: if the file at this file path is not a directory. """ def basename(): """ Retrieve the final component of the file path's path (everything after the final path separator). @return: the base name of this file path. @rtype: L{str} """ def parent(): """ A file path for the directory containing the file at this file path. """ def sibling(name): """ A file path for the directory containing the file at this file path. @param name: the name of a sibling of this path. C{name} must be a direct sibling of this path and may not contain a path separator. @return: a sibling file path of this one. """ class InsecurePath(Exception): """ Error that is raised when the path provided to L{FilePath} is invalid. """ class LinkError(Exception): """ An error with symlinks - either that there are cyclical symlinks or that symlink are not supported on this platform. """ class UnlistableError(OSError): """ An exception which is used to distinguish between errors which mean 'this is not a directory you can list' and other, more catastrophic errors. This error will try to look as much like the original error as possible, while still being catchable as an independent type. @ivar originalException: the actual original exception instance, either an L{OSError} or a L{WindowsError}. """ def __init__(self, originalException): """ Create an UnlistableError exception. @param originalException: an instance of OSError. """ self.__dict__.update(originalException.__dict__) self.originalException = originalException class _WindowsUnlistableError(UnlistableError, WindowsError): """ This exception is raised on Windows, for compatibility with previous releases of FilePath where unportable programs may have done "except WindowsError:" around a call to children(). It is private because all application code may portably catch L{UnlistableError} instead. """ def _secureEnoughString(path): """ Compute a string usable as a new, temporary filename. @param path: The path that the new temporary filename should be able to be concatenated with. @return: A pseudorandom, 16 byte string for use in secure filenames. @rtype: the type of C{path} """ secureishString = armor(sha1(randomBytes(64)).digest())[:16] return _coerceToFilesystemEncoding(path, secureishString) class AbstractFilePath(object): """ Abstract implementation of an L{IFilePath}; must be completed by a subclass. This class primarily exists to provide common implementations of certain methods in L{IFilePath}. It is *not* a required parent class for L{IFilePath} implementations, just a useful starting point. """ def getContent(self): """ Retrieve the file-like object for this file path. """ fp = self.open() try: return fp.read() finally: fp.close() def parents(self): """ Retrieve an iterator of all the ancestors of this path. @return: an iterator of all the ancestors of this path, from the most recent (its immediate parent) to the root of its filesystem. """ path = self parent = path.parent() # root.parent() == root, so this means "are we the root" while path != parent: yield parent path = parent parent = parent.parent() def children(self): """ List the children of this path object. @raise OSError: If an error occurs while listing the directory. If the error is 'serious', meaning that the operation failed due to an access violation, exhaustion of some kind of resource (file descriptors or memory), OSError or a platform-specific variant will be raised. @raise UnlistableError: If the inability to list the directory is due to this path not existing or not being a directory, the more specific OSError subclass L{UnlistableError} is raised instead. @return: an iterable of all currently-existing children of this object. """ try: subnames = self.listdir() except WindowsError as winErrObj: # WindowsError is an OSError subclass, so if not for this clause # the OSError clause below would be handling these. Windows error # codes aren't the same as POSIX error codes, so we need to handle # them differently. # Under Python 2.5 on Windows, WindowsError has a winerror # attribute and an errno attribute. The winerror attribute is # bound to the Windows error code while the errno attribute is # bound to a translation of that code to a perhaps equivalent POSIX # error number. # Under Python 2.4 on Windows, WindowsError only has an errno # attribute. It is bound to the Windows error code. # For simplicity of code and to keep the number of paths through # this suite minimal, we grab the Windows error code under either # version. # Furthermore, attempting to use os.listdir on a non-existent path # in Python 2.4 will result in a Windows error code of # ERROR_PATH_NOT_FOUND. However, in Python 2.5, # ERROR_FILE_NOT_FOUND results instead. -exarkun winerror = getattr(winErrObj, 'winerror', winErrObj.errno) if winerror not in (ERROR_PATH_NOT_FOUND, ERROR_FILE_NOT_FOUND, ERROR_INVALID_NAME, ERROR_DIRECTORY): raise raise _WindowsUnlistableError(winErrObj) except OSError as ose: if ose.errno not in (errno.ENOENT, errno.ENOTDIR): # Other possible errors here, according to linux manpages: # EACCES, EMIFLE, ENFILE, ENOMEM. None of these seem like the # sort of thing which should be handled normally. -glyph raise raise UnlistableError(ose) return map(self.child, subnames) def walk(self, descend=None): """ Yield myself, then each of my children, and each of those children's children in turn. The optional argument C{descend} is a predicate that takes a FilePath, and determines whether or not that FilePath is traversed/descended into. It will be called with each path for which C{isdir} returns C{True}. If C{descend} is not specified, all directories will be traversed (including symbolic links which refer to directories). @param descend: A one-argument callable that will return True for FilePaths that should be traversed, False otherwise. @return: a generator yielding FilePath-like objects. """ yield self if self.isdir(): for c in self.children(): # we should first see if it's what we want, then we # can walk through the directory if (descend is None or descend(c)): for subc in c.walk(descend): if os.path.realpath(self.path).startswith( os.path.realpath(subc.path)): raise LinkError("Cycle in file graph.") yield subc else: yield c def sibling(self, path): """ Return a L{FilePath} with the same directory as this instance but with a basename of C{path}. @param path: The basename of the L{FilePath} to return. @type path: L{str} @return: The sibling path. @rtype: L{FilePath} """ return self.parent().child(path) def descendant(self, segments): """ Retrieve a child or child's child of this path. @param segments: A sequence of path segments as L{str} instances. @return: A L{FilePath} constructed by looking up the C{segments[0]} child of this path, the C{segments[1]} child of that path, and so on. @since: 10.2 """ path = self for name in segments: path = path.child(name) return path def segmentsFrom(self, ancestor): """ Return a list of segments between a child and its ancestor. For example, in the case of a path X representing /a/b/c/d and a path Y representing /a/b, C{Y.segmentsFrom(X)} will return C{['c', 'd']}. @param ancestor: an instance of the same class as self, ostensibly an ancestor of self. @raise: ValueError if the 'ancestor' parameter is not actually an ancestor, i.e. a path for /x/y/z is passed as an ancestor for /a/b/c/d. @return: a list of strs """ # this might be an unnecessarily inefficient implementation but it will # work on win32 and for zipfiles; later I will deterimine if the # obvious fast implemenation does the right thing too f = self p = f.parent() segments = [] while f != ancestor and p != f: segments[0:0] = [f.basename()] f = p p = p.parent() if f == ancestor and segments: return segments raise ValueError("%r not parent of %r" % (ancestor, self)) # new in 8.0 def __hash__(self): """ Hash the same as another L{FilePath} with the same path as mine. """ return hash((self.__class__, self.path)) # pending deprecation in 8.0 def getmtime(self): """ Deprecated. Use getModificationTime instead. """ return int(self.getModificationTime()) def getatime(self): """ Deprecated. Use getAccessTime instead. """ return int(self.getAccessTime()) def getctime(self): """ Deprecated. Use getStatusChangeTime instead. """ return int(self.getStatusChangeTime()) class RWX(FancyEqMixin, object): """ A class representing read/write/execute permissions for a single user category (i.e. user/owner, group, or other/world). Instantiate with three boolean values: readable? writable? executable?. @type read: C{bool} @ivar read: Whether permission to read is given @type write: C{bool} @ivar write: Whether permission to write is given @type execute: C{bool} @ivar execute: Whether permission to execute is given @since: 11.1 """ compareAttributes = ('read', 'write', 'execute') def __init__(self, readable, writable, executable): self.read = readable self.write = writable self.execute = executable def __repr__(self): return "RWX(read=%s, write=%s, execute=%s)" % ( self.read, self.write, self.execute) def shorthand(self): """ Returns a short string representing the permission bits. Looks like part of what is printed by command line utilities such as 'ls -l' (e.g. 'rwx') @return: The shorthand string. @rtype: L{str} """ returnval = ['r', 'w', 'x'] i = 0 for val in (self.read, self.write, self.execute): if not val: returnval[i] = '-' i += 1 return ''.join(returnval) class Permissions(FancyEqMixin, object): """ A class representing read/write/execute permissions. Instantiate with any portion of the file's mode that includes the permission bits. @type user: L{RWX} @ivar user: User/Owner permissions @type group: L{RWX} @ivar group: Group permissions @type other: L{RWX} @ivar other: Other/World permissions @since: 11.1 """ compareAttributes = ('user', 'group', 'other') def __init__(self, statModeInt): self.user, self.group, self.other = ( [RWX(*[statModeInt & bit > 0 for bit in bitGroup]) for bitGroup in [[S_IRUSR, S_IWUSR, S_IXUSR], [S_IRGRP, S_IWGRP, S_IXGRP], [S_IROTH, S_IWOTH, S_IXOTH]]] ) def __repr__(self): return "[%s | %s | %s]" % ( str(self.user), str(self.group), str(self.other)) def shorthand(self): """ Returns a short string representing the permission bits. Looks like what is printed by command line utilities such as 'ls -l' (e.g. 'rwx-wx--x') @return: The shorthand string. @rtype: L{str} """ return "".join( [x.shorthand() for x in (self.user, self.group, self.other)]) class _SpecialNoValue(object): """ An object that represents 'no value', to be used in deprecating statinfo. Please remove once statinfo is removed. """ pass def _asFilesystemBytes(path, encoding=None): """ Return C{path} as a string of L{bytes} suitable for use on this system's filesystem. @param path: The path to be made suitable. @type path: L{bytes} or L{unicode} @param encoding: The encoding to use if coercing to L{bytes}. If none is given, L{sys.getfilesystemencoding} is used. @return: L{bytes} """ if type(path) == bytes: return path else: if encoding is None: encoding = sys.getfilesystemencoding() return path.encode(encoding) def _asFilesystemText(path, encoding=None): """ Return C{path} as a string of L{unicode} suitable for use on this system's filesystem. @param path: The path to be made suitable. @type path: L{bytes} or L{unicode} @param encoding: The encoding to use if coercing to L{unicode}. If none is given, L{sys.getfilesystemencoding} is used. @return: L{unicode} """ if type(path) == unicode: return path else: if encoding is None: encoding = sys.getfilesystemencoding() return path.decode(encoding) def _coerceToFilesystemEncoding(path, newpath, encoding=None): """ Return a C{newpath} that is suitable for joining to C{path}. @param path: The path that it should be suitable for joining to. @param newpath: The new portion of the path to be coerced if needed. @param encoding: If coerced, the encoding that will be used. """ if type(path) == bytes: return _asFilesystemBytes(newpath, encoding=encoding) else: return _asFilesystemText(newpath, encoding=encoding) @comparable @implementer(IFilePath) class FilePath(AbstractFilePath): """ I am a path on the filesystem that only permits 'downwards' access. Instantiate me with a pathname (for example, FilePath('/home/myuser/public_html')) and I will attempt to only provide access to files which reside inside that path. I may be a path to a file, a directory, or a file which does not exist. The correct way to use me is to instantiate me, and then do ALL filesystem access through me. In other words, do not import the 'os' module; if you need to open a file, call my 'open' method. If you need to list a directory, call my 'path' method. Even if you pass me a relative path, I will convert that to an absolute path internally. Note: although time-related methods do return floating-point results, they may still be only second resolution depending on the platform and the last value passed to L{os.stat_float_times}. If you want greater-than-second precision, call C{os.stat_float_times(True)}, or use Python 2.5. Greater-than-second precision is only available in Windows on Python2.5 and later. The type of C{path} when instantiating decides the mode of the L{FilePath}. That is, C{FilePath(b"/")} will return a L{bytes} mode L{FilePath}, and C{FilePath(u"/")} will return a L{unicode} mode L{FilePath}. C{FilePath("/")} will return a L{bytes} mode L{FilePath} on Python 2, and a L{unicode} mode L{FilePath} on Python 3. Methods that return a new L{FilePath} use the type of the given subpath to decide its mode. For example, C{FilePath(b"/").child(u"tmp")} will return a L{unicode} mode L{FilePath}. @type alwaysCreate: L{bool} @ivar alwaysCreate: When opening this file, only succeed if the file does not already exist. @type path: L{bytes} or L{unicode} @ivar path: The path from which 'downward' traversal is permitted. @ivar statinfo: (WARNING: statinfo is deprecated as of Twisted 15.0.0 and will become a private attribute) The currently cached status information about the file on the filesystem that this L{FilePath} points to. This attribute is C{None} if the file is in an indeterminate state (either this L{FilePath} has not yet had cause to call C{stat()} yet or L{FilePath.changed} indicated that new information is required), 0 if C{stat()} was called and returned an error (i.e. the path did not exist when C{stat()} was called), or a C{stat_result} object that describes the last known status of the underlying file (or directory, as the case may be). Trust me when I tell you that you do not want to use this attribute. Instead, use the methods on L{FilePath} which give you information about it, like C{getsize()}, C{isdir()}, C{getModificationTime()}, and so on. @type statinfo: L{int} or L{types.NoneType} or L{os.stat_result} """ _statinfo = None path = None def __init__(self, path, alwaysCreate=False): """ Convert a path string to an absolute path if necessary and initialize the L{FilePath} with the result. """ self.path = abspath(path) self.alwaysCreate = alwaysCreate def __getstate__(self): """ Support serialization by discarding cached L{os.stat} results and returning everything else. """ d = self.__dict__.copy() if '_statinfo' in d: del d['_statinfo'] return d @property def sep(self): """ Return a filesystem separator. @return: The native filesystem separator. @returntype: The same type as C{self.path}. """ return _coerceToFilesystemEncoding(self.path, os.sep) def _asBytesPath(self, encoding=None): """ Return the path of this L{FilePath} as bytes. @param encoding: The encoding to use if coercing to L{bytes}. If none is given, L{sys.getfilesystemencoding} is used. @return: L{bytes} """ return _asFilesystemBytes(self.path, encoding=encoding) def _asTextPath(self, encoding=None): """ Return the path of this L{FilePath} as text. @param encoding: The encoding to use if coercing to L{unicode}. If none is given, L{sys.getfilesystemencoding} is used. @return: L{unicode} """ return _asFilesystemText(self.path, encoding=encoding) def asBytesMode(self, encoding=None): """ Return this L{FilePath} in L{bytes}-mode. @param encoding: The encoding to use if coercing to L{bytes}. If none is given, L{sys.getfilesystemencoding} is used. @return: L{bytes} mode L{FilePath} """ if type(self.path) == unicode: return self.clonePath(self._asBytesPath(encoding=encoding)) return self def asTextMode(self, encoding=None): """ Return this L{FilePath} in L{unicode}-mode. @param encoding: The encoding to use if coercing to L{unicode}. If none is given, L{sys.getfilesystemencoding} is used. @return: L{unicode} mode L{FilePath} """ if type(self.path) == bytes: return self.clonePath(self._asTextPath(encoding=encoding)) return self def _getPathAsSameTypeAs(self, pattern): """ If C{pattern} is C{bytes}, return L{FilePath.path} as L{bytes}. Otherwise, return L{FilePath.path} as L{unicode}. @param pattern: The new element of the path that L{FilePath.path} may need to be coerced to match. """ if type(pattern) == bytes: return self._asBytesPath() else: return self._asTextPath() def child(self, path): """ Create and return a new L{FilePath} representing a path contained by C{self}. @param path: The base name of the new L{FilePath}. If this contains directory separators or parent references it will be rejected. @type path: L{bytes} or L{unicode} @raise InsecurePath: If the result of combining this path with C{path} would result in a path which is not a direct child of this path. @return: The child path. @rtype: L{FilePath} with a mode equal to the type of C{path}. """ colon = _coerceToFilesystemEncoding(path, ":") sep = _coerceToFilesystemEncoding(path, os.sep) ourPath = self._getPathAsSameTypeAs(path) if platform.isWindows() and path.count(colon): # Catch paths like C:blah that don't have a slash raise InsecurePath("%r contains a colon." % (path,)) norm = normpath(path) if sep in norm: raise InsecurePath("%r contains one or more directory separators" % (path,)) newpath = abspath(joinpath(ourPath, norm)) if not newpath.startswith(ourPath): raise InsecurePath("%r is not a child of %s" % (newpath, ourPath)) return self.clonePath(newpath) def preauthChild(self, path): """ Use me if C{path} might have slashes in it, but you know they're safe. @param path: A relative path (ie, a path not starting with C{"/"}) which will be interpreted as a child or descendant of this path. @type path: L{bytes} or L{unicode} @return: The child path. @rtype: L{FilePath} with a mode equal to the type of C{path}. """ ourPath = self._getPathAsSameTypeAs(path) newpath = abspath(joinpath(ourPath, normpath(path))) if not newpath.startswith(ourPath): raise InsecurePath("%s is not a child of %s" % (newpath, ourPath)) return self.clonePath(newpath) def childSearchPreauth(self, *paths): """ Return my first existing child with a name in C{paths}. C{paths} is expected to be a list of *pre-secured* path fragments; in most cases this will be specified by a system administrator and not an arbitrary user. If no appropriately-named children exist, this will return C{None}. @return: C{None} or the child path. @rtype: L{types.NoneType} or L{FilePath} """ for child in paths: p = self._getPathAsSameTypeAs(child) jp = joinpath(p, child) if exists(jp): return self.clonePath(jp) def siblingExtensionSearch(self, *exts): """ Attempt to return a path with my name, given multiple possible extensions. Each extension in C{exts} will be tested and the first path which exists will be returned. If no path exists, C{None} will be returned. If C{''} is in C{exts}, then if the file referred to by this path exists, C{self} will be returned. The extension '*' has a magic meaning, which means "any path that begins with C{self.path + '.'} is acceptable". """ for ext in exts: if not ext and self.exists(): return self p = self._getPathAsSameTypeAs(ext) star = _coerceToFilesystemEncoding(ext, "*") dot = _coerceToFilesystemEncoding(ext, ".") if ext == star: basedot = basename(p) + dot for fn in listdir(dirname(p)): if fn.startswith(basedot): return self.clonePath(joinpath(dirname(p), fn)) p2 = p + ext if exists(p2): return self.clonePath(p2) def realpath(self): """ Returns the absolute target as a L{FilePath} if self is a link, self otherwise. The absolute link is the ultimate file or directory the link refers to (for instance, if the link refers to another link, and another...). If the filesystem does not support symlinks, or if the link is cyclical, raises a L{LinkError}. Behaves like L{os.path.realpath} in that it does not resolve link names in the middle (ex. /x/y/z, y is a link to w - realpath on z will return /x/y/z, not /x/w/z). @return: L{FilePath} of the target path. @rtype: L{FilePath} @raises LinkError: if links are not supported or links are cyclical. """ if self.islink(): result = os.path.realpath(self.path) if result == self.path: raise LinkError("Cyclical link - will loop forever") return self.clonePath(result) return self def siblingExtension(self, ext): """ Attempt to return a path with my name, given the extension at C{ext}. @param ext: File-extension to search for. @type ext: L{bytes} or L{unicode} @return: The sibling path. @rtype: L{FilePath} with the same mode as the type of C{ext}. """ ourPath = self._getPathAsSameTypeAs(ext) return self.clonePath(ourPath + ext) def linkTo(self, linkFilePath): """ Creates a symlink to self to at the path in the L{FilePath} C{linkFilePath}. Only works on posix systems due to its dependence on L{os.symlink}. Propagates L{OSError}s up from L{os.symlink} if C{linkFilePath.parent()} does not exist, or C{linkFilePath} already exists. @param linkFilePath: a FilePath representing the link to be created. @type linkFilePath: L{FilePath} """ os.symlink(self.path, linkFilePath.path) def open(self, mode='r'): """ Open this file using C{mode} or for writing if C{alwaysCreate} is C{True}. In all cases the file is opened in binary mode, so it is not necessary to include C{"b"} in C{mode}. @param mode: The mode to open the file in. Default is C{"r"}. @type mode: L{str} @raises AssertionError: If C{"a"} is included in the mode and C{alwaysCreate} is C{True}. @rtype: L{file} @return: An open L{file} object. """ if self.alwaysCreate: assert 'a' not in mode, ("Appending not supported when " "alwaysCreate == True") return self.create() # This hack is necessary because of a bug in Python 2.7 on Windows: # http://bugs.python.org/issue7686 mode = mode.replace('b', '') return open(self.path, mode + 'b') # stat methods below def restat(self, reraise=True): """ Re-calculate cached effects of 'stat'. To refresh information on this path after you know the filesystem may have changed, call this method. @param reraise: a boolean. If true, re-raise exceptions from L{os.stat}; otherwise, mark this path as not existing, and remove any cached stat information. @raise Exception: If C{reraise} is C{True} and an exception occurs while reloading metadata. """ try: self._statinfo = stat(self.path) except OSError: self._statinfo = 0 if reraise: raise def changed(self): """ Clear any cached information about the state of this path on disk. @since: 10.1.0 """ self._statinfo = None def chmod(self, mode): """ Changes the permissions on self, if possible. Propagates errors from L{os.chmod} up. @param mode: integer representing the new permissions desired (same as the command line chmod) @type mode: L{int} """ os.chmod(self.path, mode) def getsize(self): """ Retrieve the size of this file in bytes. @return: The size of the file at this file path in bytes. @raise Exception: if the size cannot be obtained. @rtype: L{int} """ st = self._statinfo if not st: self.restat() st = self._statinfo return st.st_size def getModificationTime(self): """ Retrieve the time of last access from this file. @return: a number of seconds from the epoch. @rtype: L{float} """ st = self._statinfo if not st: self.restat() st = self._statinfo return float(st.st_mtime) def getStatusChangeTime(self): """ Retrieve the time of the last status change for this file. @return: a number of seconds from the epoch. @rtype: L{float} """ st = self._statinfo if not st: self.restat() st = self._statinfo return float(st.st_ctime) def getAccessTime(self): """ Retrieve the time that this file was last accessed. @return: a number of seconds from the epoch. @rtype: L{float} """ st = self._statinfo if not st: self.restat() st = self._statinfo return float(st.st_atime) def getInodeNumber(self): """ Retrieve the file serial number, also called inode number, which distinguishes this file from all other files on the same device. @raise NotImplementedError: if the platform is Windows, since the inode number would be a dummy value for all files in Windows @return: a number representing the file serial number @rtype: L{int} @since: 11.0 """ if platform.isWindows(): raise NotImplementedError st = self._statinfo if not st: self.restat() st = self._statinfo return st.st_ino def getDevice(self): """ Retrieves the device containing the file. The inode number and device number together uniquely identify the file, but the device number is not necessarily consistent across reboots or system crashes. @raise NotImplementedError: if the platform is Windows, since the device number would be 0 for all partitions on a Windows platform @return: a number representing the device @rtype: L{int} @since: 11.0 """ if platform.isWindows(): raise NotImplementedError st = self._statinfo if not st: self.restat() st = self._statinfo return st.st_dev def getNumberOfHardLinks(self): """ Retrieves the number of hard links to the file. This count keeps track of how many directories have entries for this file. If the count is ever decremented to zero then the file itself is discarded as soon as no process still holds it open. Symbolic links are not counted in the total. @raise NotImplementedError: if the platform is Windows, since Windows doesn't maintain a link count for directories, and L{os.stat} does not set C{st_nlink} on Windows anyway. @return: the number of hard links to the file @rtype: L{int} @since: 11.0 """ if platform.isWindows(): raise NotImplementedError st = self._statinfo if not st: self.restat() st = self._statinfo return st.st_nlink def getUserID(self): """ Returns the user ID of the file's owner. @raise NotImplementedError: if the platform is Windows, since the UID is always 0 on Windows @return: the user ID of the file's owner @rtype: L{int} @since: 11.0 """ if platform.isWindows(): raise NotImplementedError st = self._statinfo if not st: self.restat() st = self._statinfo return st.st_uid def getGroupID(self): """ Returns the group ID of the file. @raise NotImplementedError: if the platform is Windows, since the GID is always 0 on windows @return: the group ID of the file @rtype: L{int} @since: 11.0 """ if platform.isWindows(): raise NotImplementedError st = self._statinfo if not st: self.restat() st = self._statinfo return st.st_gid def getPermissions(self): """ Returns the permissions of the file. Should also work on Windows, however, those permissions may not be what is expected in Windows. @return: the permissions for the file @rtype: L{Permissions} @since: 11.1 """ st = self._statinfo if not st: self.restat() st = self._statinfo return Permissions(S_IMODE(st.st_mode)) def exists(self): """ Check if this L{FilePath} exists. @return: C{True} if the stats of C{path} can be retrieved successfully, C{False} in the other cases. @rtype: L{bool} """ if self._statinfo: return True else: self.restat(False) if self._statinfo: return True else: return False def isdir(self): """ Check if this L{FilePath} refers to a directory. @return: C{True} if this L{FilePath} refers to a directory, C{False} otherwise. @rtype: L{bool} """ st = self._statinfo if not st: self.restat(False) st = self._statinfo if not st: return False return S_ISDIR(st.st_mode) def isfile(self): """ Check if this file path refers to a regular file. @return: C{True} if this L{FilePath} points to a regular file (not a directory, socket, named pipe, etc), C{False} otherwise. @rtype: L{bool} """ st = self._statinfo if not st: self.restat(False) st = self._statinfo if not st: return False return S_ISREG(st.st_mode) def isBlockDevice(self): """ Returns whether the underlying path is a block device. @return: C{True} if it is a block device, C{False} otherwise @rtype: L{bool} @since: 11.1 """ st = self._statinfo if not st: self.restat(False) st = self._statinfo if not st: return False return S_ISBLK(st.st_mode) def isSocket(self): """ Returns whether the underlying path is a socket. @return: C{True} if it is a socket, C{False} otherwise @rtype: L{bool} @since: 11.1 """ st = self._statinfo if not st: self.restat(False) st = self._statinfo if not st: return False return S_ISSOCK(st.st_mode) def islink(self): """ Check if this L{FilePath} points to a symbolic link. @return: C{True} if this L{FilePath} points to a symbolic link, C{False} otherwise. @rtype: L{bool} """ # We can't use cached stat results here, because that is the stat of # the destination - (see #1773) which in *every case* but this one is # the right thing to use. We could call lstat here and use that, but # it seems unlikely we'd actually save any work that way. -glyph return islink(self.path) def isabs(self): """ Check if this L{FilePath} refers to an absolute path. This always returns C{True}. @return: C{True}, always. @rtype: L{bool} """ return isabs(self.path) def listdir(self): """ List the base names of the direct children of this L{FilePath}. @return: A L{list} of L{bytes}/L{unicode} giving the names of the contents of the directory this L{FilePath} refers to. These names are relative to this L{FilePath}. @rtype: L{list} @raise: Anything the platform L{os.listdir} implementation might raise (typically L{OSError}). """ return listdir(self.path) def splitext(self): """ Split the file path into a pair C{(root, ext)} such that C{root + ext == path}. @return: Tuple where the first item is the filename and second item is the file extension. See Python docs for L{os.path.splitext}. @rtype: L{tuple} """ return splitext(self.path) def __repr__(self): return 'FilePath(%r)' % (self.path,) def touch(self): """ Updates the access and last modification times of the file at this file path to the current time. Also creates the file if it does not already exist. @raise Exception: if unable to create or modify the last modification time of the file. """ try: self.open('a').close() except IOError: pass utime(self.path, None) def remove(self): """ Removes the file or directory that is represented by self. If C{self.path} is a directory, recursively remove all its children before removing the directory. If it's a file or link, just delete it. """ if self.isdir() and not self.islink(): for child in self.children(): child.remove() os.rmdir(self.path) else: os.remove(self.path) self.changed() def makedirs(self, ignoreExistingDirectory=False): """ Create all directories not yet existing in C{path} segments, using L{os.makedirs}. @param ignoreExistingDirectory: Don't raise L{OSError} if directory already exists. @type ignoreExistingDirectory: L{bool} @return: C{None} """ try: return os.makedirs(self.path) except OSError as e: if not ( e.errno == errno.EEXIST and ignoreExistingDirectory and self.isdir()): raise def globChildren(self, pattern): """ Assuming I am representing a directory, return a list of FilePaths representing my children that match the given pattern. @param pattern: A glob pattern to use to match child paths. @type pattern: L{unicode} or L{bytes} @return: A L{list} of matching children. @rtype: L{list} of L{FilePath}, with the mode of C{pattern}'s type """ sep = _coerceToFilesystemEncoding(pattern, os.sep) ourPath = self._getPathAsSameTypeAs(pattern) import glob path = ourPath[-1] == sep and ourPath + pattern \ or sep.join([ourPath, pattern]) return list(map(self.clonePath, glob.glob(path))) def basename(self): """ Retrieve the final component of the file path's path (everything after the final path separator). @return: The final component of the L{FilePath}'s path (Everything after the final path separator). @rtype: the same type as this L{FilePath}'s C{path} attribute """ return basename(self.path) def dirname(self): """ Retrieve all of the components of the L{FilePath}'s path except the last one (everything up to the final path separator). @return: All of the components of the L{FilePath}'s path except the last one (everything up to the final path separator). @rtype: the same type as this L{FilePath}'s C{path} attribute """ return dirname(self.path) def parent(self): """ A file path for the directory containing the file at this file path. @return: A L{FilePath} representing the path which directly contains this L{FilePath}. @rtype: L{FilePath} """ return self.clonePath(self.dirname()) def setContent(self, content, ext=b'.new'): """ Replace the file at this path with a new file that contains the given bytes, trying to avoid data-loss in the meanwhile. On UNIX-like platforms, this method does its best to ensure that by the time this method returns, either the old contents I{or} the new contents of the file will be present at this path for subsequent readers regardless of premature device removal, program crash, or power loss, making the following assumptions: - your filesystem is journaled (i.e. your filesystem will not I{itself} lose data due to power loss) - your filesystem's C{rename()} is atomic - your filesystem will not discard new data while preserving new metadata (see U{http://mjg59.livejournal.com/108257.html} for more detail) On most versions of Windows there is no atomic C{rename()} (see U{http://bit.ly/win32-overwrite} for more information), so this method is slightly less helpful. There is a small window where the file at this path may be deleted before the new file is moved to replace it: however, the new file will be fully written and flushed beforehand so in the unlikely event that there is a crash at that point, it should be possible for the user to manually recover the new version of their data. In the future, Twisted will support atomic file moves on those versions of Windows which I{do} support them: see U{Twisted ticket 3004<http://twistedmatrix.com/trac/ticket/3004>}. This method should be safe for use by multiple concurrent processes, but note that it is not easy to predict which process's contents will ultimately end up on disk if they invoke this method at close to the same time. @param content: The desired contents of the file at this path. @type content: L{bytes} @param ext: An extension to append to the temporary filename used to store the bytes while they are being written. This can be used to make sure that temporary files can be identified by their suffix, for cleanup in case of crashes. @type ext: L{bytes} """ sib = self.temporarySibling(ext) f = sib.open('w') try: f.write(content) finally: f.close() if platform.isWindows() and exists(self.path): os.unlink(self.path) os.rename(sib.path, self.path) def __cmp__(self, other): if not isinstance(other, FilePath): return NotImplemented return cmp(self.path, other.path) def createDirectory(self): """ Create the directory the L{FilePath} refers to. @see: L{makedirs} @raise OSError: If the directory cannot be created. """ os.mkdir(self.path) def requireCreate(self, val=1): """ Sets the C{alwaysCreate} variable. @param val: C{True} or C{False}, indicating whether opening this path will be required to create the file or not. @type val: L{bool} @return: C{None} """ self.alwaysCreate = val def create(self): """ Exclusively create a file, only if this file previously did not exist. @return: A file-like object opened from this path. """ fdint = os.open(self.path, _CREATE_FLAGS) # XXX TODO: 'name' attribute of returned files is not mutable or # settable via fdopen, so this file is slighly less functional than the # one returned from 'open' by default. send a patch to Python... return os.fdopen(fdint, 'w+b') def temporarySibling(self, extension=b""): """ Construct a path referring to a sibling of this path. The resulting path will be unpredictable, so that other subprocesses should neither accidentally attempt to refer to the same path before it is created, nor they should other processes be able to guess its name in advance. @param extension: A suffix to append to the created filename. (Note that if you want an extension with a '.' you must include the '.' yourself.) @type extension: L{bytes} or L{unicode} @return: a path object with the given extension suffix, C{alwaysCreate} set to True. @rtype: L{FilePath} with a mode equal to the type of C{extension} """ ourPath = self._getPathAsSameTypeAs(extension) sib = self.sibling(_secureEnoughString(ourPath) + self.clonePath(ourPath).basename() + extension) sib.requireCreate() return sib _chunkSize = 2 ** 2 ** 2 ** 2 def copyTo(self, destination, followLinks=True): """ Copies self to destination. If self doesn't exist, an OSError is raised. If self is a directory, this method copies its children (but not itself) recursively to destination - if destination does not exist as a directory, this method creates it. If destination is a file, an IOError will be raised. If self is a file, this method copies it to destination. If destination is a file, this method overwrites it. If destination is a directory, an IOError will be raised. If self is a link (and followLinks is False), self will be copied over as a new symlink with the same target as returned by os.readlink. That means that if it is absolute, both the old and new symlink will link to the same thing. If it's relative, then perhaps not (and it's also possible that this relative link will be broken). File/directory permissions and ownership will NOT be copied over. If followLinks is True, symlinks are followed so that they're treated as their targets. In other words, if self is a link, the link's target will be copied. If destination is a link, self will be copied to the destination's target (the actual destination will be destination's target). Symlinks under self (if self is a directory) will be followed and its target's children be copied recursively. If followLinks is False, symlinks will be copied over as symlinks. @param destination: the destination (a FilePath) to which self should be copied @param followLinks: whether symlinks in self should be treated as links or as their targets """ if self.islink() and not followLinks: os.symlink(os.readlink(self.path), destination.path) return # XXX TODO: *thorough* audit and documentation of the exact desired # semantics of this code. Right now the behavior of existent # destination symlinks is convenient, and quite possibly correct, but # its security properties need to be explained. if self.isdir(): if not destination.exists(): destination.createDirectory() for child in self.children(): destChild = destination.child(child.basename()) child.copyTo(destChild, followLinks) elif self.isfile(): writefile = destination.open('w') try: readfile = self.open() try: while 1: # XXX TODO: optionally use os.open, os.read and # O_DIRECT and use os.fstatvfs to determine chunk sizes # and make *****sure**** copy is page-atomic; the # following is good enough for 99.9% of everybody and # won't take a week to audit though. chunk = readfile.read(self._chunkSize) writefile.write(chunk) if len(chunk) < self._chunkSize: break finally: readfile.close() finally: writefile.close() elif not self.exists(): raise OSError(errno.ENOENT, "No such file or directory") else: # If you see the following message because you want to copy # symlinks, fifos, block devices, character devices, or unix # sockets, please feel free to add support to do sensible things in # reaction to those types! raise NotImplementedError( "Only copying of files and directories supported") def moveTo(self, destination, followLinks=True): """ Move self to destination - basically renaming self to whatever destination is named. If destination is an already-existing directory, moves all children to destination if destination is empty. If destination is a non-empty directory, or destination is a file, an OSError will be raised. If moving between filesystems, self needs to be copied, and everything that applies to copyTo applies to moveTo. @param destination: the destination (a FilePath) to which self should be copied @param followLinks: whether symlinks in self should be treated as links or as their targets (only applicable when moving between filesystems) """ try: os.rename(self.path, destination.path) except OSError as ose: if ose.errno == errno.EXDEV: # man 2 rename, ubuntu linux 5.10 "breezy": # oldpath and newpath are not on the same mounted filesystem. # (Linux permits a filesystem to be mounted at multiple # points, but rename(2) does not work across different mount # points, even if the same filesystem is mounted on both.) # that means it's time to copy trees of directories! secsib = destination.temporarySibling() self.copyTo(secsib, followLinks) # slow secsib.moveTo(destination, followLinks) # visible # done creating new stuff. let's clean me up. mysecsib = self.temporarySibling() self.moveTo(mysecsib, followLinks) # visible mysecsib.remove() # slow else: raise else: self.changed() destination.changed() def statinfo(self, value=_SpecialNoValue): """ FilePath.statinfo is deprecated. @param value: value to set statinfo to, if setting a value @return: C{_statinfo} if getting, C{None} if setting """ # This is a pretty awful hack to use the deprecated decorator to # deprecate a class attribute. Ideally, there would just be a # statinfo property and a statinfo property setter, but the # 'deprecated' decorator does not produce the correct FQDN on class # methods. So the property stuff needs to be set outside the class # definition - but the getter and setter both need the same function # in order for the 'deprecated' decorator to produce the right # deprecation string. if value is _SpecialNoValue: return self._statinfo else: self._statinfo = value # This is all a terrible hack to get statinfo deprecated _tmp = deprecated( Version('Twisted', 15, 0, 0), "other FilePath methods such as getsize(), " "isdir(), getModificationTime(), etc.")(FilePath.statinfo) FilePath.statinfo = property(_tmp, _tmp) FilePath.clonePath = FilePath
Architektor/PySnip
venv/lib/python2.7/site-packages/twisted/python/filepath.py
Python
gpl-3.0
58,621
import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from matplotlib.lines import Line2D from seispy.rfcorrect import SACStation from seispy.rf import CfgParser import argparse import numpy as np from os.path import join, realpath, basename, exists import sys def init_figure(): h = plt.figure(figsize=(11.7, 8.3)) gs = GridSpec(17, 3) gs.update(wspace=0.25) axr_sum = plt.subplot(gs[0, 0]) axr_sum.grid(color='gray', linestyle='--', linewidth=0.4, axis='x') axr = plt.subplot(gs[1:, 0]) axr.grid(color='gray', linestyle='--', linewidth=0.4, axis='x') axt_sum = plt.subplot(gs[0, 1]) axt_sum.grid(color='gray', linestyle='--', linewidth=0.4, axis='x') axt = plt.subplot(gs[1:, 1]) axt.grid(color='gray', linestyle='--', linewidth=0.4, axis='x') axb = plt.subplot(gs[1:, 2]) axb.grid(color='gray', linestyle='--', linewidth=0.4, axis='x') return h, axr, axt, axb, axr_sum, axt_sum def read_process_data(path, resamp_dt=0.1): stadata = SACStation(path) stadata.resample(resamp_dt) idx = np.argsort(stadata.bazi) stadata.event = stadata.event[idx] stadata.bazi = stadata.bazi[idx] stadata.datar = stadata.datar[idx] stadata.datat = stadata.datat[idx] return stadata def plot_waves(axr, axt, axb, axr_sum, axt_sum, stadata, enf=3): bound = np.zeros(stadata.rflength) for i in range(stadata.ev_num): datar = stadata.datar[i] * enf + (i + 1) datat = stadata.datat[i] * enf + (i + 1) # axr.plot(time_axis, stadata.datar[i], linewidth=0.2, color='black') axr.fill_between(stadata.time_axis, datar, bound + i+1, where=datar > i+1, facecolor='red', alpha=0.7) axr.fill_between(stadata.time_axis, datar, bound + i+1, where=datar < i+1, facecolor='blue', alpha=0.7) # axt.plot(time_axis, stadata.datat[i], linewidth=0.2, color='black') axt.fill_between(stadata.time_axis, datat, bound + i + 1, where=datat > i+1, facecolor='red', alpha=0.7) axt.fill_between(stadata.time_axis, datat, bound + i + 1, where=datat < i+1, facecolor='blue', alpha=0.7) datar = np.mean(stadata.datar, axis=0) datar /= np.max(datar) datat = np.mean(stadata.datat, axis=0) datat /= np.max(datar) axr_sum.fill_between(stadata.time_axis, datar, bound, where=datar > 0, facecolor='red', alpha=0.7) axr_sum.fill_between(stadata.time_axis, datar, bound, where=datar < 0, facecolor='blue', alpha=0.7) axt_sum.fill_between(stadata.time_axis, datat, bound, where=datat > 0, facecolor='red', alpha=0.7) axt_sum.fill_between(stadata.time_axis, datat, bound, where=datat < 0, facecolor='blue', alpha=0.7) axb.scatter(stadata.bazi, np.arange(stadata.ev_num) + 1, s=7) def set_fig(axr, axt, axb, axr_sum, axt_sum, stadata, station, xmin=-2, xmax=30, comp='R'): y_range = np.arange(stadata.ev_num) + 1 x_range = np.arange(0, xmax+2, 2) space = 2 # set axr axr.set_xlim(xmin, xmax) axr.set_xticks(x_range) axr.set_xticklabels(x_range, fontsize=8) axr.set_ylim(0, stadata.ev_num + space) axr.set_yticks(y_range) axr.set_yticklabels(stadata.event, fontsize=5) axr.set_xlabel('Time after P (s)', fontsize=13) axr.set_ylabel('Event', fontsize=13) axr.add_line(Line2D([0, 0], axr.get_ylim(), color='black')) # set axr_sum axr_sum.set_title('{} components ({})'.format(comp, station), fontsize=16) axr_sum.set_xlim(xmin, xmax) axr_sum.set_xticks(x_range) axr_sum.set_xticklabels([]) axr_sum.set_ylim(-0.5, 1.25) axr_sum.set_yticks([0.375]) axr_sum.set_yticklabels(['Sum'], fontsize=8) axr_sum.tick_params(axis='y', left=False) axr_sum.add_line(Line2D([0, 0], axr_sum.get_ylim(), color='black')) # set axt axt.set_xlim(xmin, xmax) axt.set_xticks(x_range) axt.set_xticklabels(x_range, fontsize=8) axt.set_ylim(0, stadata.ev_num + space) axt.set_yticks(y_range) bazi = ['{:.1f}'.format(ba) for ba in stadata.bazi] axt.set_yticklabels(bazi, fontsize=5) axt.set_xlabel('Time after P (s)', fontsize=13) axt.set_ylabel(r'Back-azimuth ($\circ$)', fontsize=13) axt.add_line(Line2D([0, 0], axt.get_ylim(), color='black')) # set axt_sum axt_sum.set_title('T components ({})'.format(station), fontsize=16) axt_sum.set_xlim(xmin, xmax) axt_sum.set_xticks(x_range) axt_sum.set_xticklabels([]) axt_sum.set_ylim(-0.5, 1.25) axt_sum.set_yticks([0.375]) axt_sum.set_yticklabels(['Sum'], fontsize=8) axt_sum.tick_params(axis='y', left=False) axt_sum.add_line(Line2D([0, 0], axt_sum.get_ylim(), color='black')) # set axb axb.set_xlim(0, 360) axb.set_xticks(np.linspace(0, 360, 7)) axb.set_xticklabels(np.linspace(0, 360, 7, dtype='i'), fontsize=8) axb.set_ylim(0, stadata.ev_num + space) axb.set_yticks(y_range) axb.set_yticklabels(y_range, fontsize=5) axb.set_xlabel(r'Back-azimuth ($\circ$)', fontsize=13) def plotrt(rfpath, enf=3, out_path='./', outformat='g', xmax=30): """Plot PRFs with R and T components :param rfpath: Path to PRFs :type rfpath: str :param enf: The enlarge factor, defaults to 3 :type enf: int, optional :param out_path: The output path, defaults to current directory :type out_path: str, optional :param outformat: File format of the image file, g as \'png\', f as \'pdf\', defaults to 'g' :type outformat: str, optional """ station = basename(rfpath) lst = join(rfpath, station+'finallist.dat') if not exists(lst): raise FileExistsError('No such a final list as {}'.format(lst)) if not exists(out_path): raise FileExistsError('The output path {} not exists'.format(out_path)) h, axr, axt, axb, axr_sum, axt_sum = init_figure() stadata = read_process_data(rfpath) plot_waves(axr, axt, axb, axr_sum, axt_sum, stadata, enf=enf) set_fig(axr, axt, axb, axr_sum, axt_sum, stadata, station, xmax=xmax, comp=stadata.comp) if outformat == 'g': h.savefig(join(out_path, station+'_RT_bazorder_{:.1f}.png'.format(stadata.f0[0])), dpi=400, bbox_inches='tight') elif outformat == 'f': h.savefig(join(out_path, station+'_RT_bazorder_{:.1f}.pdf'.format(stadata.f0[0])), format='pdf', bbox_inches='tight') def main(): parser = argparse.ArgumentParser(description="Plot R(Q)&T components for P receiver functions (PRFs)") parser.add_argument('rfpath', help='Path to PRFs with a \'finallist.dat\' in it', type=str, default=None) parser.add_argument('-e', help='Enlargement factor, defaults to 3', dest='enf', type=float, default=3, metavar='enf') parser.add_argument('-o', help='Output path without file name, defaults to current path', dest='output', default='./', type=str, metavar='outpath') parser.add_argument('-t', help='Specify figure format. f = \'.pdf\', g = \'.png\', defaults to \'g\'', dest='format', default='g', type=str, metavar='f|g') parser.add_argument('-x', help='The max time scale in sec, defaults to 30s', default=30, type=float, metavar='max_time') arg = parser.parse_args() if arg.format not in ('f', 'g'): raise ValueError('Error: The format must be in \'f\' and \'g\'') plotrt(rfpath=arg.rfpath, enf=arg.enf, out_path=arg.output, outformat=arg.format, xmax=arg.x) if __name__ == '__main__': # plotrt('XHL01') pass
xumi1993/seispy
seispy/plotRT.py
Python
gpl-3.0
7,501
from __future__ import unicode_literals from django.apps import AppConfig class ClientesConfig(AppConfig): name = 'Clientes'
carnadaxxx/lotizados
src/Clientes/apps.py
Python
apache-2.0
132
# This file is part of RinohType, the Python document preparation system. # # Copyright (c) Brecht Machiels. # # Use of this source code is subject to the terms of the GNU Affero General # Public License v3. See the LICENSE file or http://www.gnu.org/licenses/. from psg.document.dsc import dsc_document from psg.drawing.box import eps_image, canvas as psg_Canvas class Document(object): extension = '.ps' def __init__(self, rinoh_document, title): self.rinoh_document = rinoh_document self.psg_doc = dsc_document(title) def write(self, filename): fp = open(filename + self.extension, 'w', encoding='latin-1') self.psg_doc.write_to(fp) fp.close() class Page(object): def __init__(self, rinoh_page, psg_document, width, height): self.rinoh_page = rinoh_page self.psg_doc = psg_document self.psg_page = psg_document.psg_doc.page((float(width), float(height))) self.canvas = PageCanvas(self, self.psg_page.canvas()) @property def document(self): return self.rinoh_page.document class Canvas(object): def __init__(self, parent, left, bottom, width, height, clip=False): self.parent = parent self.psg_canvas = psg_Canvas(parent.psg_canvas, left, bottom, width, height, clip=clip) @property def page(self): return self.parent.page @property def document(self): return self.page.document @property def width(self): return self.psg_canvas.w() @property def height(self): return self.psg_canvas.h() def new(self, left, bottom, width, height, clip=False): new_canvas = Canvas(self, left, bottom, width, height, clip) return new_canvas def append(self, canvas): self.psg_canvas.append(canvas.psg_canvas) def save_state(self): print('gsave', file=self.psg_canvas) def restore_state(self): print('grestore', file=self.psg_canvas) def translate(self, x, y): print('{0} {1} translate'.format(x, y), file=self.psg_canvas) def scale(self, x, y=None): if y is None: y = x print('{0} {1} scale'.format(x, y), file=self.psg_canvas) def move_to(self, x, y): print('{0} {1} moveto'.format(x, y), file=self.psg_canvas) def line_to(self, x, y): print('{0} {1} lineto'.format(x, y), file=self.psg_canvas) def new_path(self): print('newpath', file=self.psg_canvas) def close_path(self): print('closepath', file=self.psg_canvas) def line_path(self, points): self.new_path() self.move_to(*points[0]) for point in points[1:]: self.line_to(*point) self.close_path() def line_width(self, width): print('{0} setlinewidth'.format(width), file=self.psg_canvas) def color(self, color): r, g, b, a = color.rgba print('{0} {1} {2} setrgbcolor'.format(r, g, b), file=self.psg_canvas) def stroke(self, linewidth, color): self.save_state() self.color(color) self.line_width(float(linewidth)) print('stroke', file=self.psg_canvas) self.restore_state() def fill(self): self.save_state() self.color(color) print('fill', file=self.psg_canvas) self.restore_state() def _select_font(self, font, size): self.font_wrapper = self.psg_canvas.page.register_font(font.psFont, True) print('/{0} findfont'.format(self.font_wrapper.ps_name()), file=self.psg_canvas) print('{0} scalefont'.format(size), file=self.psg_canvas) print('setfont', file=self.psg_canvas) def show_glyphs(self, x, y, font, size, glyphs, x_displacements): self.move_to(x, y) self._select_font(font, size) try: ps_repr = self.font_wrapper.postscript_representation(glyphs) except AttributeError: raise RuntimeError('No font selected for canvas.') widths = ' '.join(map(lambda f: '%.2f' % f, x_displacements)) print('({0}) [{1}] xshow'.format(ps_repr, widths), file=self.psg_canvas) def place_image(self, image): canvas.psg_canvas.append(image.eps) class Image(object): def __init__(self, filename): self.eps = eps_image(canvas.psg_canvas, open(filename + '.eps', 'rb'), document_level=True) self.height = eps.h() * self.scale self.width = eps.w() * self.scale class PageCanvas(Canvas): def __init__(self, page, psg_canvas): self.parent = page self.psg_canvas = psg_canvas @property def page(self): return self.parent.rinoh_page
beni55/rinohtype
rinoh/backend/psg.py
Python
agpl-3.0
4,834
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import paddle import paddle.nn as nn from paddle.fluid.param_attr import ParamAttr from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Linear, MaxPool2D from paddle.utils.download import get_weights_path_from_url __all__ = [] model_urls = { "shufflenet_v2_x0_25": ( "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_25_pretrained.pdparams", "e753404cbd95027759c5f56ecd6c9c4b", ), "shufflenet_v2_x0_33": ( "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_33_pretrained.pdparams", "776e3cf9a4923abdfce789c45b8fe1f2", ), "shufflenet_v2_x0_5": ( "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_5_pretrained.pdparams", "e3649cf531566917e2969487d2bc6b60", ), "shufflenet_v2_x1_0": ( "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained.pdparams", "7821c348ea34e58847c43a08a4ac0bdf", ), "shufflenet_v2_x1_5": ( "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_5_pretrained.pdparams", "93a07fa557ab2d8803550f39e5b6c391", ), "shufflenet_v2_x2_0": ( "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x2_0_pretrained.pdparams", "4ab1f622fd0d341e0f84b4e057797563", ), "shufflenet_v2_swish": ( "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_swish_pretrained.pdparams", "daff38b3df1b3748fccbb13cfdf02519", ), } def channel_shuffle(x, groups): batch_size, num_channels, height, width = x.shape[0:4] channels_per_group = num_channels // groups # reshape x = paddle.reshape( x, shape=[batch_size, groups, channels_per_group, height, width]) # transpose x = paddle.transpose(x, perm=[0, 2, 1, 3, 4]) # flatten x = paddle.reshape(x, shape=[batch_size, num_channels, height, width]) return x class ConvBNLayer(nn.Layer): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, groups=1, act=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, weight_attr=ParamAttr(initializer=nn.initializer.KaimingNormal()), bias_attr=False, ) self._batch_norm = BatchNorm(out_channels, act=act) def forward(self, inputs): x = self._conv(inputs) x = self._batch_norm(x) return x class InvertedResidual(nn.Layer): def __init__(self, in_channels, out_channels, stride, act="relu"): super(InvertedResidual, self).__init__() self._conv_pw = ConvBNLayer( in_channels=in_channels // 2, out_channels=out_channels // 2, kernel_size=1, stride=1, padding=0, groups=1, act=act) self._conv_dw = ConvBNLayer( in_channels=out_channels // 2, out_channels=out_channels // 2, kernel_size=3, stride=stride, padding=1, groups=out_channels // 2, act=None) self._conv_linear = ConvBNLayer( in_channels=out_channels // 2, out_channels=out_channels // 2, kernel_size=1, stride=1, padding=0, groups=1, act=act) def forward(self, inputs): x1, x2 = paddle.split( inputs, num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2], axis=1) x2 = self._conv_pw(x2) x2 = self._conv_dw(x2) x2 = self._conv_linear(x2) out = paddle.concat([x1, x2], axis=1) return channel_shuffle(out, 2) class InvertedResidualDS(nn.Layer): def __init__(self, in_channels, out_channels, stride, act="relu"): super(InvertedResidualDS, self).__init__() # branch1 self._conv_dw_1 = ConvBNLayer( in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=stride, padding=1, groups=in_channels, act=None) self._conv_linear_1 = ConvBNLayer( in_channels=in_channels, out_channels=out_channels // 2, kernel_size=1, stride=1, padding=0, groups=1, act=act) # branch2 self._conv_pw_2 = ConvBNLayer( in_channels=in_channels, out_channels=out_channels // 2, kernel_size=1, stride=1, padding=0, groups=1, act=act) self._conv_dw_2 = ConvBNLayer( in_channels=out_channels // 2, out_channels=out_channels // 2, kernel_size=3, stride=stride, padding=1, groups=out_channels // 2, act=None) self._conv_linear_2 = ConvBNLayer( in_channels=out_channels // 2, out_channels=out_channels // 2, kernel_size=1, stride=1, padding=0, groups=1, act=act) def forward(self, inputs): x1 = self._conv_dw_1(inputs) x1 = self._conv_linear_1(x1) x2 = self._conv_pw_2(inputs) x2 = self._conv_dw_2(x2) x2 = self._conv_linear_2(x2) out = paddle.concat([x1, x2], axis=1) return channel_shuffle(out, 2) class ShuffleNetV2(nn.Layer): """ShuffleNetV2 model from `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/pdf/1807.11164.pdf>`_ Args: scale (float, optional) - scale of output channels. Default: True. act (str, optional) - activation function of neural network. Default: "relu". num_classes (int, optional): output dim of last fc layer. If num_classes <=0, last fc layer will not be defined. Default: 1000. with_pool (bool, optional): use pool before the last fc layer or not. Default: True. Examples: .. code-block:: python import paddle from paddle.vision.models import ShuffleNetV2 shufflenet_v2_swish = ShuffleNetV2(scale=1.0, act="swish") x = paddle.rand([1, 3, 224, 224]) out = shufflenet_v2_swish(x) print(out.shape) """ def __init__(self, scale=1.0, act="relu", num_classes=1000, with_pool=True): super(ShuffleNetV2, self).__init__() self.scale = scale self.num_classes = num_classes self.with_pool = with_pool stage_repeats = [4, 8, 4] if scale == 0.25: stage_out_channels = [-1, 24, 24, 48, 96, 512] elif scale == 0.33: stage_out_channels = [-1, 24, 32, 64, 128, 512] elif scale == 0.5: stage_out_channels = [-1, 24, 48, 96, 192, 1024] elif scale == 1.0: stage_out_channels = [-1, 24, 116, 232, 464, 1024] elif scale == 1.5: stage_out_channels = [-1, 24, 176, 352, 704, 1024] elif scale == 2.0: stage_out_channels = [-1, 24, 224, 488, 976, 2048] else: raise NotImplementedError("This scale size:[" + str(scale) + "] is not implemented!") # 1. conv1 self._conv1 = ConvBNLayer( in_channels=3, out_channels=stage_out_channels[1], kernel_size=3, stride=2, padding=1, act=act) self._max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1) # 2. bottleneck sequences self._block_list = [] for stage_id, num_repeat in enumerate(stage_repeats): for i in range(num_repeat): if i == 0: block = self.add_sublayer( sublayer=InvertedResidualDS( in_channels=stage_out_channels[stage_id + 1], out_channels=stage_out_channels[stage_id + 2], stride=2, act=act), name=str(stage_id + 2) + "_" + str(i + 1)) else: block = self.add_sublayer( sublayer=InvertedResidual( in_channels=stage_out_channels[stage_id + 2], out_channels=stage_out_channels[stage_id + 2], stride=1, act=act), name=str(stage_id + 2) + "_" + str(i + 1)) self._block_list.append(block) # 3. last_conv self._last_conv = ConvBNLayer( in_channels=stage_out_channels[-2], out_channels=stage_out_channels[-1], kernel_size=1, stride=1, padding=0, act=act) # 4. pool if with_pool: self._pool2d_avg = AdaptiveAvgPool2D(1) # 5. fc if num_classes > 0: self._out_c = stage_out_channels[-1] self._fc = Linear(stage_out_channels[-1], num_classes) def forward(self, inputs): x = self._conv1(inputs) x = self._max_pool(x) for inv in self._block_list: x = inv(x) x = self._last_conv(x) if self.with_pool: x = self._pool2d_avg(x) if self.num_classes > 0: x = paddle.flatten(x, start_axis=1, stop_axis=-1) x = self._fc(x) return x def _shufflenet_v2(arch, pretrained=False, **kwargs): model = ShuffleNetV2(**kwargs) if pretrained: assert ( arch in model_urls ), "{} model do not have a pretrained model now, you should set pretrained=False".format( arch) weight_path = get_weights_path_from_url(model_urls[arch][0], model_urls[arch][1]) param = paddle.load(weight_path) model.set_dict(param) return model def shufflenet_v2_x0_25(pretrained=False, **kwargs): """ShuffleNetV2 with 0.25x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Ecient CNN Architecture Design" <https://arxiv.org/pdf/1807.11164.pdf>`_ 。 Args: pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. Examples: .. code-block:: python import paddle from paddle.vision.models import shufflenet_v2_x0_25 # build model model = shufflenet_v2_x0_25() # build model and load imagenet pretrained weight # model = shufflenet_v2_x0_25(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) """ return _shufflenet_v2( "shufflenet_v2_x0_25", scale=0.25, pretrained=pretrained, **kwargs) def shufflenet_v2_x0_33(pretrained=False, **kwargs): """ShuffleNetV2 with 0.33x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Ecient CNN Architecture Design" <https://arxiv.org/pdf/1807.11164.pdf>`_ 。 Args: pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. Examples: .. code-block:: python import paddle from paddle.vision.models import shufflenet_v2_x0_33 # build model model = shufflenet_v2_x0_33() # build model and load imagenet pretrained weight # model = shufflenet_v2_x0_33(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) """ return _shufflenet_v2( "shufflenet_v2_x0_33", scale=0.33, pretrained=pretrained, **kwargs) def shufflenet_v2_x0_5(pretrained=False, **kwargs): """ShuffleNetV2 with 0.5x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Ecient CNN Architecture Design" <https://arxiv.org/pdf/1807.11164.pdf>`_ 。 Args: pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. Examples: .. code-block:: python import paddle from paddle.vision.models import shufflenet_v2_x0_5 # build model model = shufflenet_v2_x0_5() # build model and load imagenet pretrained weight # model = shufflenet_v2_x0_5(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) """ return _shufflenet_v2( "shufflenet_v2_x0_5", scale=0.5, pretrained=pretrained, **kwargs) def shufflenet_v2_x1_0(pretrained=False, **kwargs): """ShuffleNetV2 with 1.0x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Ecient CNN Architecture Design" <https://arxiv.org/pdf/1807.11164.pdf>`_ 。 Args: pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. Examples: .. code-block:: python import paddle from paddle.vision.models import shufflenet_v2_x1_0 # build model model = shufflenet_v2_x1_0() # build model and load imagenet pretrained weight # model = shufflenet_v2_x1_0(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) """ return _shufflenet_v2( "shufflenet_v2_x1_0", scale=1.0, pretrained=pretrained, **kwargs) def shufflenet_v2_x1_5(pretrained=False, **kwargs): """ShuffleNetV2 with 1.5x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Ecient CNN Architecture Design" <https://arxiv.org/pdf/1807.11164.pdf>`_ 。 Args: pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. Examples: .. code-block:: python import paddle from paddle.vision.models import shufflenet_v2_x1_5 # build model model = shufflenet_v2_x1_5() # build model and load imagenet pretrained weight # model = shufflenet_v2_x1_5(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) """ return _shufflenet_v2( "shufflenet_v2_x1_5", scale=1.5, pretrained=pretrained, **kwargs) def shufflenet_v2_x2_0(pretrained=False, **kwargs): """ShuffleNetV2 with 2.0x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Ecient CNN Architecture Design" <https://arxiv.org/pdf/1807.11164.pdf>`_ 。 Args: pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. Examples: .. code-block:: python import paddle from paddle.vision.models import shufflenet_v2_x2_0 # build model model = shufflenet_v2_x2_0() # build model and load imagenet pretrained weight # model = shufflenet_v2_x2_0(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) """ return _shufflenet_v2( "shufflenet_v2_x2_0", scale=2.0, pretrained=pretrained, **kwargs) def shufflenet_v2_swish(pretrained=False, **kwargs): """ShuffleNetV2 with 1.0x output channels and swish activation function, as described in `"ShuffleNet V2: Practical Guidelines for Ecient CNN Architecture Design" <https://arxiv.org/pdf/1807.11164.pdf>`_ 。 Args: pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. Examples: .. code-block:: python import paddle from paddle.vision.models import shufflenet_v2_swish # build model model = shufflenet_v2_swish() # build model and load imagenet pretrained weight # model = shufflenet_v2_swish(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape) """ return _shufflenet_v2( "shufflenet_v2_swish", scale=1.0, act="swish", pretrained=pretrained, **kwargs)
luotao1/Paddle
python/paddle/vision/models/shufflenetv2.py
Python
apache-2.0
17,363
from __future__ import print_function # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import gzip import os import random import serial from serial.serialutil import SerialTimeoutException from StringIO import StringIO import sys import time import zipfile def guess_port(): port = os.environ.get('UPURS_PORT') if port: return port for pattern in "/dev/ttyACM? /dev/ttyUSB? /dev/tty.usbserial* /dev/tty.usbmodem* /dev/tty.wchusbserial*".split(): matches = glob.glob(pattern) if matches: return matches[0] print("Opening port") USE_TIMEOUT=0 ser = serial.Serial(guess_port(), timeout=0, write_timeout=0.5 if USE_TIMEOUT else None) decompress = False fn = None for arg in sys.argv[1:]: if arg == '-d': decompress = True else: fn = arg if not decompress: data = open(fn).read() else: data = None # try loading a .uef out of a .zip try: zf = zipfile.ZipFile(fn) for f in zf.namelist(): if f.endswith(".uef"): print("found %s in zip" % f) data = zf.read(f) print("read %d bytes from %s inside %s" % (len(data), f, fn)) break except zipfile.BadZipfile: print("not a zip file") if data is None: # not a zip or can't find a .uef in there data = open(fn).read() print("read %d bytes from %s" % (len(data), fn)) # try un-gzipping it try: data = gzip.GzipFile(fileobj=StringIO(data)).read() print("after gunzipping: %d bytes" % len(data)) except IOError: print("not gzipped") print("Sending %s to port and verifying that it comes back" % fn) n_out = n_in = 0 received = [] n_retries = 0 print("Writing %d (%x) bytes" % (len(data), len(data))) for c in data: while True: v = ord(c) print("%02x %c" % (v, c if 32 < v < 127 else '.')) try: n = ser.write(c) except SerialTimeoutException: n = 0 print(n) if not USE_TIMEOUT: break # try receiving r = ser.read(1000) if r: print("RECEIVED", repr(r)) received.append(r) if n: break # next char time.sleep(0.01) print("RETRY", end=' ') n_retries += 1 print("Waiting for final serial loopback") start = time.time() while (time.time() - start) < 0.5: r = ser.read() if not r: time.sleep(0.1) continue # we got something, so reset the timeout start = time.time() print(repr(r)) received.append(r) print("ALL SENT") received = ''.join(received) print("This is what we received:") print(repr(received)) n = len(received) print("%d (%x) bytes (%d missing). %d retries." % (n, n, len(data) - n, n_retries))
google/myelin-acorn-electron-hardware
upurs_usb_port/upload_to_upurs.py
Python
apache-2.0
3,335
import numpy as np from scipy.integrate import odeint from bokeh.plotting import * def streamlines(x, y, u, v, density=1): '''Returns streamlines of a vector flow. * x and y are 1d arrays defining an *evenly spaced* grid. * u and v are 2d arrays (shape [y,x]) giving velocities. * density controls the closeness of the streamlines. For different densities in each direction, use a tuple or list [densityx, densityy]. ''' ## Set up some constants - size of the grid used. NGX = len(x) NGY = len(y) ## Constants used to convert between grid index coords and user coords. DX = x[1]-x[0] DY = y[1]-y[0] XOFF = x[0] YOFF = y[0] ## Now rescale velocity onto axes-coordinates u = u / (x[-1]-x[0]) v = v / (y[-1]-y[0]) speed = np.sqrt(u*u+v*v) ## s (path length) will now be in axes-coordinates, but we must ## rescale u for integrations. u *= NGX v *= NGY ## Now u and v in grid-coordinates. NBX = int(30*density) NBY = int(30*density) blank = np.zeros((NBY,NBX)) bx_spacing = NGX/float(NBX-1) by_spacing = NGY/float(NBY-1) def blank_pos(xi, yi): return int((xi / bx_spacing) + 0.5), \ int((yi / by_spacing) + 0.5) def value_at(a, xi, yi): if type(xi) == np.ndarray: x = xi.astype(np.int) y = yi.astype(np.int) else: x = np.int(xi) y = np.int(yi) a00 = a[y,x] a01 = a[y,x+1] a10 = a[y+1,x] a11 = a[y+1,x+1] xt = xi - x yt = yi - y a0 = a00*(1-xt) + a01*xt a1 = a10*(1-xt) + a11*xt return a0*(1-yt) + a1*yt def rk4_integrate(x0, y0): ## This function does RK4 forward and back trajectories from ## the initial conditions, with the odd 'blank array' ## termination conditions. TODO tidy the integration loops. def f(xi, yi): dt_ds = 1./value_at(speed, xi, yi) ui = value_at(u, xi, yi) vi = value_at(v, xi, yi) return ui*dt_ds, vi*dt_ds def g(xi, yi): dt_ds = 1./value_at(speed, xi, yi) ui = value_at(u, xi, yi) vi = value_at(v, xi, yi) return -ui*dt_ds, -vi*dt_ds check = lambda xi, yi: xi>=0 and xi<NGX-1 and yi>=0 and yi<NGY-1 bx_changes = [] by_changes = [] ## Integrator function def rk4(x0, y0, f): ds = 0.01 #min(1./NGX, 1./NGY, 0.01) stotal = 0 xi = x0 yi = y0 xb, yb = blank_pos(xi, yi) xf_traj = [] yf_traj = [] while check(xi, yi): # Time step. First save the point. xf_traj.append(xi) yf_traj.append(yi) # Next, advance one using RK4 try: k1x, k1y = f(xi, yi) k2x, k2y = f(xi + .5*ds*k1x, yi + .5*ds*k1y) k3x, k3y = f(xi + .5*ds*k2x, yi + .5*ds*k2y) k4x, k4y = f(xi + ds*k3x, yi + ds*k3y) except IndexError: # Out of the domain on one of the intermediate steps break xi += ds*(k1x+2*k2x+2*k3x+k4x) / 6. yi += ds*(k1y+2*k2y+2*k3y+k4y) / 6. # Final position might be out of the domain if not check(xi, yi): break stotal += ds # Next, if s gets to thres, check blank. new_xb, new_yb = blank_pos(xi, yi) if new_xb != xb or new_yb != yb: # New square, so check and colour. Quit if required. if blank[new_yb,new_xb] == 0: blank[new_yb,new_xb] = 1 bx_changes.append(new_xb) by_changes.append(new_yb) xb = new_xb yb = new_yb else: break if stotal > 2: break return stotal, xf_traj, yf_traj integrator = rk4 sf, xf_traj, yf_traj = integrator(x0, y0, f) sb, xb_traj, yb_traj = integrator(x0, y0, g) stotal = sf + sb x_traj = xb_traj[::-1] + xf_traj[1:] y_traj = yb_traj[::-1] + yf_traj[1:] ## Tests to check length of traj. Remember, s in units of axes. if len(x_traj) < 1: return None if stotal > .2: initxb, inityb = blank_pos(x0, y0) blank[inityb, initxb] = 1 return x_traj, y_traj else: for xb, yb in zip(bx_changes, by_changes): blank[yb, xb] = 0 return None ## A quick function for integrating trajectories if blank==0. trajectories = [] def traj(xb, yb): if xb < 0 or xb >= NBX or yb < 0 or yb >= NBY: return if blank[yb, xb] == 0: t = rk4_integrate(xb*bx_spacing, yb*by_spacing) if t != None: trajectories.append(t) ## Now we build up the trajectory set. I've found it best to look ## for blank==0 along the edges first, and work inwards. for indent in range((max(NBX,NBY))//2): for xi in range(max(NBX,NBY)-2*indent): traj(xi+indent, indent) traj(xi+indent, NBY-1-indent) traj(indent, xi+indent) traj(NBX-1-indent, xi+indent) xs = [np.array(t[0])*DX+XOFF for t in trajectories] ys = [np.array(t[1])*DY+YOFF for t in trajectories] return xs, ys xx = np.linspace(-3, 3, 100) yy = np.linspace(-3, 3, 100) Y, X = np.meshgrid(xx, yy) U = -1 - X**2 + Y V = 1 + X - Y**2 speed = np.sqrt(U*U + V*V) theta = np.arctan(V/U) x0 = X[::2, ::2].flatten() y0 = Y[::2, ::2].flatten() length = speed[::2, ::2].flatten()/40 angle = theta[::2, ::2].flatten() x1 = x0 + length * np.cos(angle) y1 = y0 + length * np.sin(angle) xs, ys = streamlines(xx, yy, U.T, V.T, density=2) cm = np.array(["#C7E9B4", "#7FCDBB", "#41B6C4", "#1D91C0", "#225EA8", "#0C2C84"]) ix = ((length-length.min())/(length.max()-length.min())*5).astype('int') colors = cm[ix] output_cloud("vector") figure(tools="pan,wheel_zoom,box_zoom,reset,previewsave") segment(x0, y0, x1, y1, line_color=colors, line_width=2, ) multi_line(xs, ys, line_color="#ee6666", line_width=2, line_alpha=0.8, name="vector example" ) show() # open a browser
sahat/bokeh
examples/plotting/cloud/vector.py
Python
bsd-3-clause
6,505
// Language: Python // Author: heckerman100 print("Hello World)
mojtabatmj/Hacktoberfest2017
hello-scripts/Hello_world-heckerman100.py
Python
mit
65
"""The test for the threshold sensor platform.""" from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, STATE_UNKNOWN, TEMP_CELSIUS from homeassistant.setup import async_setup_component async def test_sensor_upper(hass): """Test if source is above threshold.""" config = { "binary_sensor": { "platform": "threshold", "upper": "15", "entity_id": "sensor.test_monitored", } } assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() hass.states.async_set( "sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS} ) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("entity_id") == "sensor.test_monitored" assert state.attributes.get("sensor_value") == 16 assert state.attributes.get("position") == "above" assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"]) assert state.attributes.get("hysteresis") == 0.0 assert state.attributes.get("type") == "upper" assert state.state == "on" hass.states.async_set("sensor.test_monitored", 14) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "off" hass.states.async_set("sensor.test_monitored", 15) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "off" async def test_sensor_lower(hass): """Test if source is below threshold.""" config = { "binary_sensor": { "platform": "threshold", "lower": "15", "entity_id": "sensor.test_monitored", } } assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() hass.states.async_set("sensor.test_monitored", 16) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "above" assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"]) assert state.attributes.get("hysteresis") == 0.0 assert state.attributes.get("type") == "lower" assert state.state == "off" hass.states.async_set("sensor.test_monitored", 14) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "on" async def test_sensor_hysteresis(hass): """Test if source is above threshold using hysteresis.""" config = { "binary_sensor": { "platform": "threshold", "upper": "15", "hysteresis": "2.5", "entity_id": "sensor.test_monitored", } } assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() hass.states.async_set("sensor.test_monitored", 20) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "above" assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"]) assert state.attributes.get("hysteresis") == 2.5 assert state.attributes.get("type") == "upper" assert state.state == "on" hass.states.async_set("sensor.test_monitored", 13) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "on" hass.states.async_set("sensor.test_monitored", 12) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "off" hass.states.async_set("sensor.test_monitored", 17) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "off" hass.states.async_set("sensor.test_monitored", 18) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "on" async def test_sensor_in_range_no_hysteresis(hass): """Test if source is within the range.""" config = { "binary_sensor": { "platform": "threshold", "lower": "10", "upper": "20", "entity_id": "sensor.test_monitored", } } assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() hass.states.async_set( "sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS} ) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("entity_id") == "sensor.test_monitored" assert state.attributes.get("sensor_value") == 16 assert state.attributes.get("position") == "in_range" assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"]) assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"]) assert state.attributes.get("hysteresis") == 0.0 assert state.attributes.get("type") == "range" assert state.state == "on" hass.states.async_set("sensor.test_monitored", 9) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "below" assert state.state == "off" hass.states.async_set("sensor.test_monitored", 21) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "above" assert state.state == "off" async def test_sensor_in_range_with_hysteresis(hass): """Test if source is within the range.""" config = { "binary_sensor": { "platform": "threshold", "lower": "10", "upper": "20", "hysteresis": "2", "entity_id": "sensor.test_monitored", } } assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() hass.states.async_set( "sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS} ) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("entity_id") == "sensor.test_monitored" assert state.attributes.get("sensor_value") == 16 assert state.attributes.get("position") == "in_range" assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"]) assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"]) assert state.attributes.get("hysteresis") == float( config["binary_sensor"]["hysteresis"] ) assert state.attributes.get("type") == "range" assert state.state == "on" hass.states.async_set("sensor.test_monitored", 8) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "in_range" assert state.state == "on" hass.states.async_set("sensor.test_monitored", 7) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "below" assert state.state == "off" hass.states.async_set("sensor.test_monitored", 12) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "below" assert state.state == "off" hass.states.async_set("sensor.test_monitored", 13) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "in_range" assert state.state == "on" hass.states.async_set("sensor.test_monitored", 22) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "in_range" assert state.state == "on" hass.states.async_set("sensor.test_monitored", 23) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "above" assert state.state == "off" hass.states.async_set("sensor.test_monitored", 18) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "above" assert state.state == "off" hass.states.async_set("sensor.test_monitored", 17) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "in_range" assert state.state == "on" async def test_sensor_in_range_unknown_state(hass): """Test if source is within the range.""" config = { "binary_sensor": { "platform": "threshold", "lower": "10", "upper": "20", "entity_id": "sensor.test_monitored", } } assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() hass.states.async_set( "sensor.test_monitored", 16, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS} ) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("entity_id") == "sensor.test_monitored" assert state.attributes.get("sensor_value") == 16 assert state.attributes.get("position") == "in_range" assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"]) assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"]) assert state.attributes.get("hysteresis") == 0.0 assert state.attributes.get("type") == "range" assert state.state == "on" hass.states.async_set("sensor.test_monitored", STATE_UNKNOWN) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("position") == "unknown" assert state.state == "off" async def test_sensor_lower_zero_threshold(hass): """Test if a lower threshold of zero is set.""" config = { "binary_sensor": { "platform": "threshold", "lower": "0", "entity_id": "sensor.test_monitored", } } assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() hass.states.async_set("sensor.test_monitored", 16) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("type") == "lower" assert state.attributes.get("lower") == float(config["binary_sensor"]["lower"]) assert state.state == "off" hass.states.async_set("sensor.test_monitored", -3) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "on" async def test_sensor_upper_zero_threshold(hass): """Test if an upper threshold of zero is set.""" config = { "binary_sensor": { "platform": "threshold", "upper": "0", "entity_id": "sensor.test_monitored", } } assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() hass.states.async_set("sensor.test_monitored", -10) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.attributes.get("type") == "upper" assert state.attributes.get("upper") == float(config["binary_sensor"]["upper"]) assert state.state == "off" hass.states.async_set("sensor.test_monitored", 2) await hass.async_block_till_done() state = hass.states.get("binary_sensor.threshold") assert state.state == "on"
kennedyshead/home-assistant
tests/components/threshold/test_binary_sensor.py
Python
apache-2.0
11,914
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import mock from decimal import Decimal from os import path, unlink from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ValidationError from django.core.urlresolvers import reverse from django.db.models import Max from django.test import TestCase from . import utils from . import forms from . import models from ..conference.models import Conference def escape_redirect(s): return s.replace('/', '%2F') def ctype(model): return ContentType.objects.get_for_model(model) # used to mock the redis connection def get_next_invoice_number(): def wrapper(sequence_name=None): last = models.Purchase.objects.aggregate(last=Max('invoice_number'))['last'] if last is None: return 1 else: return last + 1 return wrapper class ViewTests(TestCase): def test_purchase_required_login(self): url = reverse('attendees_purchase') self.assertRedirects( self.client.get(url, follow=True), '/en/accounts/login/?next=' + escape_redirect(url)) def test_purchase_confirm_required_login(self): url = reverse('attendees_purchase_confirm') self.assertRedirects( self.client.get(url, follow=True), '/en/accounts/login/?next=' + escape_redirect(url)) def test_purchase_names_required_login(self): url = reverse('attendees_purchase_names') self.assertRedirects( self.client.get(url, follow=True), '/en/accounts/login/?next=' + escape_redirect(url)) class PurchaseViewTests(TestCase): def setUp(self): self.user = get_user_model().objects.create_user( 'user', 'user@user.com', 'user') self.user.first_name = 'Firstname' self.user.last_name = 'Lastname' self.user.save() def tearDown(self): self.user.delete() def test_purchase_form_prefilled(self): """The purchase form should be prefilled with the current user's firstname, lastname and e-mail address.""" self.client.login(username='user', password='user') resp = self.client.get(reverse('attendees_purchase')) initial = resp.context['form'].initial self.assertEqual('Firstname', initial['first_name']) self.assertEqual('Lastname', initial['last_name']) self.assertEqual('user@user.com', initial['email']) class UtilsTests(TestCase): def test_rounding(self): self.assertEqual(Decimal('1.25'), utils.round_money_value(Decimal('1.245'))) self.assertEqual(Decimal('1.24'), utils.round_money_value(Decimal('1.244'))) self.assertEqual(Decimal('1.25'), utils.round_money_value(1.245)) class TicketQuantityFormTests(TestCase): def setUp(self): now = datetime.datetime.now() self.voucher_type = models.VoucherType() self.voucher_type.save() self.ticket_type_with_voucher = models.TicketType( name='with voucher', is_active=True, date_valid_from=now, date_valid_to=now + datetime.timedelta(days=365), vouchertype_needed=self.voucher_type, content_type=ctype(models.VenueTicket) ) self.ticket_type_without_limit = models.TicketType( name='without limit', is_active=True, date_valid_from=now, date_valid_to=now + datetime.timedelta(days=365), max_purchases=0, content_type=ctype(models.VenueTicket) ) self.ticket_type_with_limit = models.TicketType( name='with limit', is_active=True, date_valid_from=now, date_valid_to=now + datetime.timedelta(days=365), max_purchases=2, content_type=ctype(models.VenueTicket) ) self.ticket_type_without_limit.save() self.ticket_type_with_limit.save() self.ticket_type_with_voucher.save() def tearDown(self): self.voucher_type.delete() self.ticket_type_without_limit.delete() self.ticket_type_with_limit.delete() self.ticket_type_with_voucher.delete() def test_max_amount_with_voucher(self): """ A ticket that requires a voucher can only have the qty of 1. """ qty_key = 'tq-{0}-quantity'.format(self.ticket_type_with_voucher.pk) form = forms.TicketQuantityForm( self.ticket_type_with_voucher, data={qty_key: 2}) self.assertFalse(form.is_valid()) def test_max_amount_without_limit(self): """ A ticket that has a limit set should have this value be enforced. """ qty_key = 'tq-{0}-quantity'.format(self.ticket_type_without_limit.pk) form = forms.TicketQuantityForm( self.ticket_type_without_limit, data={qty_key: 2}) self.assertTrue(form.is_valid()) def test_max_amount_exceeded_with_limit(self): """ A ticket that has a limit set should have this value be enforced. """ form = forms.TicketQuantityForm( self.ticket_type_with_limit, data={'tq-{0}-quantity'.format(self.ticket_type_with_limit.pk): 3}) self.assertFalse(form.is_valid()) def test_max_amount_valid_with_limit(self): """ If the request amount doesn't exceed the maximum amount then the form is valid. """ form = forms.TicketQuantityForm( self.ticket_type_with_limit, data={'tq-{0}-quantity'.format(self.ticket_type_with_limit.pk): 2}) self.assertTrue(form.is_valid()) class TicketVoucherFormTests(TestCase): def setUp(self): now = datetime.datetime.now() self.user = get_user_model().objects.create_user('test_user', 'test@test.com', 'test_password') self.voucher_type = models.VoucherType(name='type1') self.voucher_type.save() self.voucher_type2 = models.VoucherType(name='type2') self.voucher_type2.save() self.voucher = models.Voucher( type=self.voucher_type, date_valid=now + datetime.timedelta(days=1)) self.voucher.save() self.voucher2 = models.Voucher( type=self.voucher_type2, date_valid=now + datetime.timedelta(days=1)) self.voucher2.save() self.voucher = models.Voucher.objects.get(pk=self.voucher.pk) self.voucher2 = models.Voucher.objects.get(pk=self.voucher2.pk) self.purchase = models.Purchase( user=self.user, first_name='First name', last_name='Last name', street='street 123', zip_code='1234', city='city', country='country', email='test@test.com') self.purchase.save() self.ticket_type = models.TicketType( name='test', date_valid_from=now, date_valid_to=now + datetime.timedelta(days=1), vouchertype_needed=self.voucher_type, content_type=ctype(models.VenueTicket)) self.ticket_type.save() self.ticket = models.VenueTicket(purchase=self.purchase, ticket_type=self.ticket_type) self.ticket.save() def tearDown(self): self.ticket_type.delete() self.user.delete() def test_code_validation(self): form = forms.TicketVoucherForm(instance=self.ticket, data={ 'tv-{0}-code'.format(self.ticket.pk): 123 }) self.assertFalse(form.is_valid()) form = forms.TicketVoucherForm(instance=self.ticket, data={ 'tv-{0}-code'.format(self.ticket.pk): self.voucher.code }) self.assertTrue(form.is_valid()) form = forms.TicketVoucherForm(instance=self.ticket, data={ 'tv-{0}-code'.format(self.ticket.pk): self.voucher2.code }) self.assertFalse(form.is_valid()) class TicketAssignmentFormTests(TestCase): def setUp(self): self.user1 = get_user_model().objects.create_user( 'test_user1', 'test1@test.com', 'test_password') self.user2 = get_user_model().objects.create_user( 'test_user2', 'test2@test.com', 'test_password') def tearDown(self): self.user1.delete() self.user2.delete() def test_username_required(self): form = forms.TicketAssignmentForm() self.assertFalse(form.is_valid()) def test_non_existing_username(self): form = forms.TicketAssignmentForm( current_user=self.user1, data={'username': 'i-dont-exist@test.com'}) self.assertFalse(form.is_valid()) def test_username_not_of_current_user(self): form = forms.TicketAssignmentForm( current_user=self.user1, data={'username': self.user2.username}) self.assertTrue(form.is_valid()) def test_username_of_current_user(self): form = forms.TicketAssignmentForm( current_user=self.user1, data={'username': self.user1.username}) self.assertTrue(form.is_valid()) class PurchaseOverviewFormTests(TestCase): def test_creditcard_unavailable_for_zero_total(self): form = forms.PurchaseOverviewForm( purchase=models.Purchase(payment_total=0.0)) available_methods = [c[0] for c in form.fields['payment_method'].choices] self.assertNotIn('creditcard', available_methods) def test_creditcard_available_for_gtzero_total(self): form = forms.PurchaseOverviewForm( purchase=models.Purchase(payment_total=0.01)) available_methods = [c[0] for c in form.fields['payment_method'].choices] self.assertIn('creditcard', available_methods) class PurchaseProcessTest(TestCase): def setUp(self): self.now = datetime.datetime.now() self.purchase_start = self.now - datetime.timedelta(days=5) self.purchase_end = self.now + datetime.timedelta(days=25) self.ct_venueticket = ctype(models.VenueTicket) self.ct_simcardticket = ctype(models.SIMCardTicket) self.ct_supportticket = ctype(models.SupportTicket) self.user = get_user_model().objects.create_user(username='user', email='user@user.com', password='user') self.client.login(username='user', password='user') # setup the conference self.conference = Conference.objects.create(title='TestConf') # setup the voucher types and vouchers self.vt_student = models.VoucherType.objects.create( conference=self.conference, name='VT:Student') self.vt_fin_aid = models.VoucherType.objects.create( conference=self.conference, name='VT:FinAid') self.v_student = models.Voucher.objects.create( remarks='V:Student', date_valid=self.purchase_end, type=self.vt_student) self.v_fin_aid = models.Voucher.objects.create( remarks='V:FinAid', date_valid=self.purchase_end, type=self.vt_fin_aid) # setup the ticket types self.tt_conf_student = models.TicketType.objects.create( conference=self.conference, name='TT:Student', fee=100, is_active=True, date_valid_from=self.purchase_start, date_valid_to=self.purchase_end, vouchertype_needed=self.vt_student, content_type=self.ct_venueticket) self.tt_conf_standard = models.TicketType.objects.create( conference=self.conference, name='TT:Standard', fee=200, max_purchases=7, is_active=True, date_valid_from=self.purchase_start, date_valid_to=self.purchase_end, content_type=self.ct_venueticket) self.tt_conf_finaid = models.TicketType.objects.create( conference=self.conference, name='TT:FinAid', fee=0, is_active=True, date_valid_from=self.purchase_start, date_valid_to=self.purchase_end, vouchertype_needed=self.vt_fin_aid, content_type=self.ct_venueticket) self.tt_conf_ondesk = models.TicketType.objects.create( conference=self.conference, name='TT:OnDesk', fee=42, is_active=False, is_on_desk_active=True, date_valid_from=self.purchase_start, date_valid_to=self.purchase_end, vouchertype_needed=self.vt_fin_aid, content_type=self.ct_venueticket) self.tt_sim = models.TicketType.objects.create( conference=self.conference, name='TT:SIM', fee=12.34, is_active=True, date_valid_from=self.purchase_start, date_valid_to=self.purchase_end, content_type=self.ct_simcardticket) self.tt_support10 = models.TicketType.objects.create( conference=self.conference, name='TT:Support10', fee=10, is_active=True, date_valid_from=self.purchase_start, date_valid_to=self.purchase_end, content_type=self.ct_supportticket) self.tt_support50 = models.TicketType.objects.create( conference=self.conference, name='TT:Support50', fee=50, is_active=True, date_valid_from=self.purchase_start, date_valid_to=self.purchase_end, content_type=self.ct_supportticket) # setup the t-shirts sizes self.ts_mxl = models.TShirtSize.objects.create( conference=self.conference, size='TS:MaleXL', sort=2) self.ts_fm = models.TShirtSize.objects.create( conference=self.conference, size='TS:FemaleM', sort=1) def tearDown(self): for purchase in models.Purchase.objects.all(): if path.exists(purchase.invoice_filepath): unlink(purchase.invoice_filepath) for klass in [models.Purchase, models.VenueTicket, models.SIMCardTicket, models.SupportTicket, models.Ticket, models.TShirtSize, models.TicketType, models.Voucher, models.VoucherType, Conference]: for inst in klass.objects.all(): inst.delete() def assertQuantityForm(self, response, ticket_type, limit): text = '<select id="id_tq-%d-quantity" name="tq-%d-quantity">' % ( ticket_type.pk, ticket_type.pk) for i in range(0, limit + 1): text += '<option value="%d">%d</option>' % (i, i) text += '</select>' self.assertContains(response, text, html=True) def assertNameForm(self, response, ticket, ticket_klass): t_pk = ticket.pk self.assertIsInstance(ticket, ticket_klass) if isinstance(ticket, models.VenueTicket): self.assertContains(response, '<input id="id_tn-%d-first_name" maxlength="250" name="tn-%d-first_name" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_tn-%d-last_name" maxlength="250" name="tn-%d-last_name" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_tn-%d-organisation" maxlength="100" name="tn-%d-organisation" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<select id="id_tn-%d-shirtsize" name="tn-%d-shirtsize">' '<option value="" selected="selected">---------</option>' '<option value="%d">TS:FemaleM</option>' '<option value="%d">TS:MaleXL</option>' '</select>' % (t_pk, t_pk, self.ts_fm.pk, self.ts_mxl.pk), count=1, html=True) elif isinstance(ticket, models.SIMCardTicket): self.assertContains(response, '<select id="id_sc-%d-gender" name="sc-%d-gender">' '<option value="" selected="selected">---------</option>' '<option value="female">female</option>' '<option value="male">male</option>' '</select>' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-first_name" maxlength="250" name="sc-%d-first_name" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-last_name" maxlength="250" name="sc-%d-last_name" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-date_of_birth" name="sc-%d-date_of_birth" placeholder="yyyy-mm-dd" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-hotel_name" maxlength="100" name="sc-%d-hotel_name" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-email" maxlength="75" name="sc-%d-email" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-street" maxlength="100" name="sc-%d-street" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-zip_code" maxlength="20" name="sc-%d-zip_code" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-city" maxlength="100" name="sc-%d-city" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-country" maxlength="100" name="sc-%d-country" type="text" />' % (t_pk, t_pk), count=1, html=True) self.assertContains(response, '<input id="id_sc-%d-phone" maxlength="100" name="sc-%d-phone" type="text" />' % (t_pk, t_pk), count=1, html=True) def assertVoucherForm(self, response, ticket): t_pk = ticket.pk self.assertContains(response, '<input id="id_tv-%d-code" maxlength="12" name="tv-%d-code" type="text" />' % (t_pk, t_pk), count=1, html=True) def assertTicketInOverview(self, response, ticket_type, first_name, last_name, amount, count=1): text = '<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>' % ( ticket_type, first_name, last_name, amount) self.assertContains(response, text, count=count, html=True) @mock.patch('pyconde.attendees.utils.generate_invoice_number', new_callable=get_next_invoice_number) def test_valid_purchase_process(self, mock_gen_inv_nr): response = self.client.get(reverse('attendees_purchase')) # check we are on the start page self.assertContains(response, '<li class="active">Start</li>', html=True) # check for ticket type names: self.assertContains(response, 'TT:Student (100.00 EUR)') self.assertContains(response, 'TT:Standard (200.00 EUR)') self.assertContains(response, 'TT:FinAid (0.00 EUR)') self.assertContains(response, 'TT:SIM (12.34 EUR)') self.assertContains(response, 'TT:Support10 (10.00 EUR)') self.assertContains(response, 'TT:Support50 (50.00 EUR)') self.assertNotContains(response, 'TT:OnDesk') self.assertQuantityForm(response, self.tt_conf_student, 1) self.assertQuantityForm(response, self.tt_conf_standard, 7) self.assertQuantityForm(response, self.tt_conf_finaid, 1) self.assertQuantityForm(response, self.tt_sim, 10) self.assertQuantityForm(response, self.tt_support10, 10) self.assertQuantityForm(response, self.tt_support50, 10) self.assertNotContains(response, '<select id="id_tq-%d-quantity" name="tq-%d-quantity">' % ( self.tt_conf_ondesk.pk, self.tt_conf_ondesk.pk)) # TODO: Check for purchase form (billing address, etc.) # Post the quantity and purchase data data = { # quantities 'tq-%d-quantity' % self.tt_conf_student.pk: 1, 'tq-%d-quantity' % self.tt_conf_standard.pk: 2, 'tq-%d-quantity' % self.tt_conf_finaid.pk: 1, 'tq-%d-quantity' % self.tt_sim.pk: 2, 'tq-%d-quantity' % self.tt_support10.pk: 0, 'tq-%d-quantity' % self.tt_support50.pk: 4, # billing address 'city': 'P:Berlin', 'comments': 'P:SomeComment', 'company_name': 'P:ExCom', 'country': 'P:Germany', 'email': 'purchase@example.com', 'first_name': 'P:FirstName', 'last_name': 'P:LastName', 'street': 'P:Street 123', 'vat_id': 'P:V4TID', 'zip_code': 'P:Z1P-2345', } response = self.client.post(reverse('attendees_purchase'), data=data) self.assertRedirects(response, reverse('attendees_purchase_names')) # check for created tickets tickets = self.client.session['purchase_state']['tickets'] self.assertIsInstance(tickets[0], models.VenueTicket) self.assertIsInstance(tickets[1], models.VenueTicket) self.assertIsInstance(tickets[2], models.VenueTicket) self.assertIsInstance(tickets[3], models.VenueTicket) self.assertIsInstance(tickets[4], models.SIMCardTicket) self.assertIsInstance(tickets[5], models.SIMCardTicket) self.assertIsInstance(tickets[6], models.SupportTicket) self.assertIsInstance(tickets[7], models.SupportTicket) self.assertIsInstance(tickets[8], models.SupportTicket) self.assertIsInstance(tickets[9], models.SupportTicket) for i in range(10): self.assertEqual(tickets[i].pk, i) # Check for temp pk # TODO: check for created purchase object response = self.client.get(reverse('attendees_purchase_names')) # check we are on the names page self.assertContains(response, '<li class="active">Ticket info</li>', html=True) # check for name forms labels self.assertContains(response, '<legend>1. Ticket (TT:Student)</legend>', count=1, html=True) self.assertContains(response, '<legend>2. Ticket (TT:Standard)</legend>', count=1, html=True) self.assertContains(response, '<legend>3. Ticket (TT:Standard)</legend>', count=1, html=True) self.assertContains(response, '<legend>4. Ticket (TT:FinAid)</legend>', count=1, html=True) self.assertContains(response, '<legend>Voucher</legend>', count=1, html=True) self.assertContains(response, '<label for="" class="requiredField"> 1. TT:Student <span class="asteriskField">*</span></label>', count=1, html=True) self.assertContains(response, '<label for="" class="requiredField"> 2. TT:FinAid <span class="asteriskField">*</span></label>', count=1, html=True) self.assertContains(response, '<legend>SIM Card(s)</legend>', count=1, html=True) self.assertContains(response, '<legend>1. TT:SIM</legend>', count=1, html=True) self.assertContains(response, '<legend>2. TT:SIM</legend>', count=1, html=True) # check for form fields tickets = self.client.session['purchase_state']['tickets'] for i in range(4): self.assertNameForm(response, tickets[i], models.VenueTicket) for i in range(4, 6): self.assertNameForm(response, tickets[i], models.SIMCardTicket) self.assertVoucherForm(response, tickets[0]) # Student self.assertVoucherForm(response, tickets[3]) # FinAid # Post the ticket names data = {} shirtsizes = (self.ts_fm.pk, self.ts_mxl.pk) for i in range(4): data.update({ 'tn-%d-first_name' % tickets[i].pk: 'TN:%d:FirstName' % i, 'tn-%d-last_name' % tickets[i].pk: 'TN:%d:LastName' % i, 'tn-%d-organisation' % tickets[i].pk: 'TN:%d:Organisation' % i, 'tn-%d-shirtsize' % tickets[i].pk: shirtsizes[i % 2], }) for i in range(4, 6): data.update({ 'sc-%d-gender' % tickets[i].pk: (i % 2) and 'male' or 'female', 'sc-%d-first_name' % tickets[i].pk: 'SC:%d:FirstName' % i, 'sc-%d-last_name' % tickets[i].pk: 'SC:%d:LastName' % i, 'sc-%d-date_of_birth' % tickets[i].pk: '2014-0%d-0%d' % (i, i), 'sc-%d-hotel_name' % tickets[i].pk: 'SC:%d:HotelName' % i, 'sc-%d-email' % tickets[i].pk: 'sc-%d@example.com' % i, 'sc-%d-street' % tickets[i].pk: 'SC:%d:Street %d' % (i, i), 'sc-%d-zip_code' % tickets[i].pk: 'SC:%d:ZIP' % i, 'sc-%d-city' % tickets[i].pk: 'SC:%d:City' % i, 'sc-%d-country' % tickets[i].pk: 'SC:%d:Country' % i, 'sc-%d-phone' % tickets[i].pk: 'SC:%d:Phone' % i, }) data.update({ 'tv-0-code': self.v_student.code, 'tv-3-code': self.v_fin_aid.code, }) response = self.client.post(reverse('attendees_purchase_names'), data=data) self.assertRedirects(response, reverse('attendees_purchase_confirm')) # TODO: check ticket data response = self.client.get(reverse('attendees_purchase_confirm')) # check we are on the confirmation page self.assertContains(response, '<li class="active">Overview</li>', html=True) # check for ticket list self.assertTicketInOverview(response, 'TT:Student', 'TN:0:FirstName', 'TN:0:LastName', '100.00 EUR') self.assertTicketInOverview(response, 'TT:Standard', 'TN:1:FirstName', 'TN:1:LastName', '200.00 EUR') self.assertTicketInOverview(response, 'TT:Standard', 'TN:2:FirstName', 'TN:2:LastName', '200.00 EUR') self.assertTicketInOverview(response, 'TT:FinAid', 'TN:3:FirstName', 'TN:3:LastName', '0.00 EUR') self.assertTicketInOverview(response, 'TT:SIM', 'SC:4:FirstName', 'SC:4:LastName', '12.34 EUR') self.assertTicketInOverview(response, 'TT:SIM', 'SC:5:FirstName', 'SC:5:LastName', '12.34 EUR') self.assertTicketInOverview(response, 'TT:Support50', '', '', '50.00 EUR', count=4) # check for billing address and total self.assertContains(response, '<p>' 'P:ExCom<br />' 'P:FirstName P:LastName<br />' 'P:Street 123<br />' 'P:Z1P-2345 P:Berlin<br />' 'P:Germany<br />' '</p>', count=1, html=True) self.assertContains(response, '<td>724.68 EUR</td>', count=1, html=True) data = { 'accept_terms': True, 'payment_method': 'invoice', } response = self.client.post(reverse('attendees_purchase_confirm'), data=data, follow=True) # TODO: check persisted ticket data # check we are on the completion page self.assertContains(response, '<li class="active">Complete</li>', html=True) def test_information_restoration_from_step2_to_step1(self): """ When the user goes back from the "names" page to the start page all the information that had been provided there should once again be available. """ response = self.client.get(reverse('attendees_purchase')) # check we are on the start page self.assertContains(response, '<li class="active">Start</li>', html=True) # Now let's fill out the form and move one to the second page. purchase_data = { 'first_name': u'FirstName', 'last_name': u'LastName', 'company_name': u'Company', 'email': u'email@test.com', 'street': u'StreetNameAndNumber', 'zip_code': u'8010', 'country': u'Austria', 'city': u'Graz', 'vat_id': u'123', 'comments': u'Some comments', } ticket_data = { 'tq-1-quantity': 1, 'tq-2-quantity': 0, 'tq-3-quantity': 0, 'tq-4-quantity': 0, 'tq-5-quantity': 0, 'tq-6-quantity': 0, } form_data = {} form_data.update(purchase_data) form_data.update(ticket_data) response = self.client.post(reverse('attendees_purchase'), data=form_data, follow=True) self.assertContains(response, '<li class="active">Ticket info</li>', html=True) # Now we go back to the start page and make ensure that the data # we entered before is still there. response = self.client.get(reverse('attendees_purchase')) self.maxDiff = None self.assertEqual(purchase_data, response.context['form'].initial) for idx, qty in enumerate([1, None, None, None, None, None]): self.assertEqual(qty, response.context['quantity_forms'][idx].initial['quantity']) class TestPurchaseModel(TestCase): def setUp(self): now = datetime.datetime.now() ct = ContentType.objects.get(app_label='attendees', model='venueticket') self.included = models.TicketType.objects.create( name="included", fee=100, date_valid_from=now - datetime.timedelta(days=-1), date_valid_to=now + datetime.timedelta(days=1), content_type=ct) self.included_free = models.TicketType.objects.create( name="included_free", fee=0, date_valid_from=now - datetime.timedelta(days=-1), date_valid_to=now + datetime.timedelta(days=1), content_type=ct) self.excluded = models.TicketType.objects.create( name="excluded", fee=200, prevent_invoice=True, date_valid_from=now - datetime.timedelta(days=-1), date_valid_to=now + datetime.timedelta(days=1), content_type=ct) self.excluded_free = models.TicketType.objects.create( name="excluded_free", fee=0, prevent_invoice=True, date_valid_from=now - datetime.timedelta(days=-1), date_valid_to=now + datetime.timedelta(days=1), content_type=ct) self.purchase_data = { 'first_name': 'Max', 'last_name': 'Mustermann', 'email': 'max@mustermann.de', 'street': 'Musterstraße', 'zip_code': 12345, 'city': 'Musterhausen', 'country': 'Musterland', } def test_send_invoice_to_user_invoice_incl_excl(self): purchase = models.Purchase.objects.create( payment_method='invoice', payment_total=300, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.included) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.excluded) self.assertTrue(purchase.send_invoice_to_user) def test_send_invoice_to_user_invoice_incl_exclfree(self): purchase = models.Purchase.objects.create( payment_method='invoice', payment_total=100, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.included) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.excluded_free) self.assertTrue(purchase.send_invoice_to_user) def test_send_invoice_to_user_invoice_inclfree_excl(self): purchase = models.Purchase.objects.create( payment_method='invoice', payment_total=200, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.included_free) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.excluded) self.assertTrue(purchase.send_invoice_to_user) def test_send_invoice_to_user_invoice_inclfree_exclfree(self): purchase = models.Purchase.objects.create( payment_method='invoice', payment_total=0, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.included_free) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.excluded_free) self.assertTrue(purchase.send_invoice_to_user) def test_send_invoice_to_user_invoice_incl(self): purchase = models.Purchase.objects.create( payment_method='invoice', payment_total=100, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.included) self.assertTrue(purchase.send_invoice_to_user) def test_send_invoice_to_user_invoice_inclfree(self): purchase = models.Purchase.objects.create( payment_method='invoice', payment_total=0, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.included_free) self.assertTrue(purchase.send_invoice_to_user) def test_send_invoice_to_user_invoice_excl(self): purchase = models.Purchase.objects.create( payment_method='invoice', payment_total=200, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.excluded) self.assertTrue(purchase.send_invoice_to_user) def test_send_invoice_to_user_invoice_exclfree(self): purchase = models.Purchase.objects.create( payment_method='invoice', payment_total=0, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.excluded_free) self.assertFalse(purchase.send_invoice_to_user) def test_send_invoice_to_user_creditcard_excl(self): purchase = models.Purchase.objects.create( payment_method='creditcard', payment_total=200, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.excluded) self.assertTrue(purchase.send_invoice_to_user) def test_send_invoice_to_user_creditcard_exclfree(self): purchase = models.Purchase.objects.create( payment_method='creditcard', payment_total=0, **self.purchase_data) models.VenueTicket.objects.create(purchase=purchase, ticket_type=self.excluded_free) self.assertTrue(purchase.send_invoice_to_user) class TestTicketTypeModel(TestCase): def setUp(self): self.vt_ct = ContentType.objects.get(app_label='attendees', model='venueticket') self.now = datetime.datetime.now() self.venue_ticket_type = models.TicketType.objects.create(name="test", fee=100, date_valid_from=self.now - datetime.timedelta(days=-1), date_valid_to=self.now + datetime.timedelta(days=1), editable_fields='shirtsize', content_type=self.vt_ct) def tearDown(self): self.venue_ticket_type.delete() def test_returns_all_tickettypes(self): """ All subclasses of attendees.ticket should provided by limit_ticket_types including ticket itself. """ from django.db.models import get_models expected = set() found = set() for model in get_models(): if issubclass(model, models.Ticket): expected.add("{0}.{1}".format( model._meta.app_label, model.__name__.lower())) for node in models.limit_ticket_types().children: values = dict(node.children) found.add("{0}.{1}".format( values['app_label'], values['model'])) self.assertEqual(expected, found) def test_get_editable_fields_empty(self): """ If the field is empty, an empty list should be returned. """ self.assertEqual([], models.TicketType(editable_fields="").get_editable_fields()) def test_get_editable_fields_default(self): """ By default no fields should be marked as editable. """ self.assertEqual([], models.TicketType().get_editable_fields()) def test_get_editable_fields_spaces(self): """ The list of editable fields should be specified as a comma-separated string with arbitrary spaces. """ expected = ["a", "b", "c"] inputs = [ "a, b, c", " a,b ,c", "a,b,c," ] for input in inputs: self.assertEquals(expected, models.TicketType(editable_fields=input).get_editable_fields()) def test_get_readonly_fields(self): fields = set(self.venue_ticket_type.get_readonly_fields()) expected = set(['first_name', 'last_name', 'organisation', 'dietary_preferences']) self.assertEqual(expected, fields) def test_clean_editable_fields_with_unknown_field(self): self.venue_ticket_type.editable_fields = 'unknown' with self.assertRaises(ValidationError): self.venue_ticket_type.clean() def test_clean_editable_fields_with_valid_field(self): self.venue_ticket_type.editable_fields = 'first_name' self.venue_ticket_type.clean() def test_querysets(self): # too early (not yet on sale) models.TicketType.objects.create(name="too early", fee=1, date_valid_from=self.now + datetime.timedelta(days=1), date_valid_to=self.now + datetime.timedelta(days=2), editable_fields='shirtsize', is_active=True, content_type=self.vt_ct) # too late (not on sale anymore) models.TicketType.objects.create(name="too late", fee=1, date_valid_from=self.now - datetime.timedelta(days=2), date_valid_to=self.now - datetime.timedelta(days=1), editable_fields='shirtsize', is_active=True, content_type=self.vt_ct) # not active models.TicketType.objects.create(name="not active", fee=1, date_valid_from=self.now - datetime.timedelta(days=1), date_valid_to=self.now + datetime.timedelta(days=1), editable_fields='shirtsize', is_active=False, content_type=self.vt_ct) # active active = models.TicketType.objects.create(name="active", fee=1, date_valid_from=self.now - datetime.timedelta(days=1), date_valid_to=self.now + datetime.timedelta(days=1), editable_fields='shirtsize', is_active=True, content_type=self.vt_ct) # active and on-desk active_ondesk = models.TicketType.objects.create(name="active / ondesk", fee=1, date_valid_from=self.now - datetime.timedelta(days=1), date_valid_to=self.now + datetime.timedelta(days=1), editable_fields='shirtsize', is_active=True, is_on_desk_active=True, content_type=self.vt_ct) # on-desk on_desk = models.TicketType.objects.create(name="ondesk", fee=1, date_valid_from=self.now - datetime.timedelta(days=1), date_valid_to=self.now + datetime.timedelta(days=1), editable_fields='shirtsize', is_on_desk_active=True, content_type=self.vt_ct) # Test tickets for normal purchase process self.assertEqual(list(models.TicketType.objects.available()), [active, active_ondesk]) # Test tickets for on-desk 'checkin' purchase process self.assertEqual(list(models.TicketType.objects.filter_ondesk()), [active_ondesk, on_desk]) class TestTicketModel(TestCase): def test_ticket_editable_if_enabled_on_tickettype(self): user = get_user_model()() conference = Conference(tickets_editable=True) ticket_type = models.TicketType(allow_editing=True, conference=conference) ticket = models.Ticket(ticket_type=ticket_type, user=user) self.assertTrue(ticket.can_be_edited_by(user)) def test_ticket_editable_if_enabled_on_conference(self): user = get_user_model()() conference = Conference(tickets_editable=True) ticket_type = models.TicketType(allow_editing=None, conference=conference) ticket = models.Ticket(ticket_type=ticket_type, user=user) self.assertTrue(ticket.can_be_edited_by(user)) def test_ticket_editable_if_enabled_on_ticket_and_disabled_on_conference(self): user = get_user_model()() conference = Conference(tickets_editable=False) ticket_type = models.TicketType(allow_editing=True, conference=conference) ticket = models.Ticket(ticket_type=ticket_type, user=user) self.assertTrue(ticket.can_be_edited_by(user)) def test_ticket_not_editable_if_disabled_on_ticket(self): user = get_user_model()() conference = Conference(tickets_editable=True) ticket_type = models.TicketType(allow_editing=False, conference=conference) ticket = models.Ticket(ticket_type=ticket_type, user=user) self.assertFalse(ticket.can_be_edited_by(user)) def test_ticket_not_editable_if_disabled_on_conference(self): user = get_user_model()() conference = Conference(tickets_editable=False) ticket_type = models.TicketType(allow_editing=None, conference=conference) ticket = models.Ticket(ticket_type=ticket_type, user=user) self.assertFalse(ticket.can_be_edited_by(user)) def test_ticket_not_editable_if_time_over_on_tickettype(self): user = get_user_model()() now = datetime.datetime.now() conference = Conference(tickets_editable=True) ticket_type = models.TicketType(allow_editing=True, editable_until=now + datetime.timedelta(days=-1), conference=conference) ticket = models.Ticket(ticket_type=ticket_type, user=user) self.assertFalse(ticket.can_be_edited_by(user, current_time=now)) def test_ticket_not_editable_if_time_over_on_conference(self): user = get_user_model()() now = datetime.datetime.now() conference = Conference(tickets_editable=True, tickets_editable_until=now + datetime.timedelta(days=-1)) ticket_type = models.TicketType(allow_editing=True, conference=conference) ticket = models.Ticket(ticket_type=ticket_type, user=user) self.assertFalse(ticket.can_be_edited_by(user, current_time=now)) class TestVenueTicketModel(TestCase): def test_get_fields(self): expected = set(['first_name', 'last_name', 'organisation', 'shirtsize', 'dietary_preferences']) fields = models.VenueTicket.get_fields() self.assertEqual(expected, fields) class TestSupportTicketModel(TestCase): def test_get_fields(self): expected = set() fields = models.SupportTicket.get_fields() self.assertEqual(expected, fields)
pysv/djep
pyconde/attendees/tests.py
Python
bsd-3-clause
43,383
#!/usr/bin/env python2 # Copyright (c) 2015 The VCoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test PrioritiseTransaction code # from test_framework.test_framework import VCoinTestFramework from test_framework.util import * COIN = 100000000 class PrioritiseTransactionTest(VCoinTestFramework): def __init__(self): self.txouts = gen_return_txouts() def setup_chain(self): print("Initializing test directory "+self.options.tmpdir) initialize_chain_clean(self.options.tmpdir, 1) def setup_network(self): self.nodes = [] self.is_network_split = False self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"])) self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] def run_test(self): utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 90) base_fee = self.relayfee*100 # our transactions are smaller than 100kb txids = [] # Create 3 batches of transactions at 3 different fee rate levels for i in xrange(3): txids.append([]) txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee) # add a fee delta to something in the cheapest bucket and make sure it gets mined # also check that a different entry in the cheapest bucket is NOT mined (lower # the priority to ensure its not mined due to priority) self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN)) self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0) self.nodes[0].generate(1) mempool = self.nodes[0].getrawmempool() print "Assert that prioritised transasction was mined" assert(txids[0][0] not in mempool) assert(txids[0][1] in mempool) high_fee_tx = None for x in txids[2]: if x not in mempool: high_fee_tx = x # Something high-fee should have been mined! assert(high_fee_tx != None) # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction). self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN)) # Add everything back to mempool self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Check to make sure our high fee rate tx is back in the mempool mempool = self.nodes[0].getrawmempool() assert(high_fee_tx in mempool) # Now verify the high feerate transaction isn't mined. self.nodes[0].generate(5) # High fee transaction should not have been mined, but other high fee rate # transactions should have been. mempool = self.nodes[0].getrawmempool() print "Assert that de-prioritised transaction is still in mempool" assert(high_fee_tx in mempool) for x in txids[2]: if (x != high_fee_tx): assert(x not in mempool) # Create a free, low priority transaction. Should be rejected. utxo_list = self.nodes[0].listunspent() assert(len(utxo_list) > 0) utxo = utxo_list[0] inputs = [] outputs = {} inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]}) outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"] txid = self.nodes[0].sendrawtransaction(tx_hex) # A tx that spends an in-mempool tx has 0 priority, so we can use it to # test the effect of using prioritise transaction for mempool acceptance inputs = [] inputs.append({"txid": txid, "vout": 0}) outputs = {} outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs) tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"] tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"] try: self.nodes[0].sendrawtransaction(tx2_hex) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee assert(tx2_id not in self.nodes[0].getrawmempool()) else: assert(False) # This is a less than 1000-byte transaction, so just set the fee # to be the minimum for a 1000 byte transaction and check that it is # accepted. self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN)) print "Assert that prioritised free transaction is accepted to mempool" assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id) assert(tx2_id in self.nodes[0].getrawmempool()) if __name__ == '__main__': PrioritiseTransactionTest().main()
vcoin-project/v
qa/rpc-tests/prioritise_transaction.py
Python
mit
5,037
# -*- coding: utf-8 -*- # ------------------------------------------------------------ # Mzero - XBMC Plugin # http://blog.tvalacarta.info/plugin-xbmc/Mzero/ # ------------------------------------------------------------ import os import re import sys import urlparse from core import config from core import jsontools from core import logger from core import scrapertools from core.item import Item DEBUG = config.get_setting("debug") CHANNEL_HOST = "http://animeflv.net/" CHANNEL_DEFAULT_HEADERS = [ ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:22.0) Gecko/20100101 Firefox/22.0"], ["Accept-Encoding", "gzip, deflate"], ["Referer", CHANNEL_HOST] ] ''' ### PARA USAR CON TRATK.TV ### season: debe ir en orden descendente episode: la "temporada 1" siempre son "0 capitulos", la "temporada 2" es el "numero de capitulos de la temporada 1" FAIRY TAIL: - SEASON 1: EPISODE 48 --> [season 1, episode: 0] - SEASON 2: EPISODE 48 --> [season 2, episode: 48] - SEASON 3: EPISODE 54 --> [season 3, episode: 96 ( [48=season2] +[ 48=season1] )] - SEASON 4: EPISODE 175 --> [season 4: episode: 150 ( [54=season3] + [48=season2] + [48=season3] )] animeflv_data.json { "TVSHOW_RENUMBER": { "Fairy Tail": { "season_episode": [ [4, 150], [3, 96], [2, 48], [1, 0] ] }, "Fairy Tail (2014)": { "season_episode": [ [6, 51], [5, 0] ] } } } ''' def mainlist(item): logger.info("Mzero.channels.animeflv mainlist") itemlist = list([]) itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=CHANNEL_HOST, viewmode="movie")) itemlist.append(Item(channel=item.channel, action="menuseries", title="Series", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=series"))) itemlist.append(Item(channel=item.channel, action="menuovas", title="OVAS", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=ovas"))) itemlist.append(Item(channel=item.channel, action="menupeliculas", title="Películas", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=peliculas"))) itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url=urlparse.urljoin(CHANNEL_HOST, "animes/?buscar="))) return itemlist def menuseries(item): logger.info("Mzero.channels.animeflv menuseries") itemlist = list() itemlist.append(Item(channel=item.channel, action="letras", title="Por orden alfabético", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=series"))) itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=series"))) itemlist.append(Item(channel=item.channel, action="series", title="En emisión", url=urlparse.urljoin(CHANNEL_HOST, "animes/en-emision/?orden=nombre&mostrar=series"), viewmode="movies_with_plot")) return itemlist def menuovas(item): logger.info("Mzero.channels.animeflv menuovas") itemlist = list() itemlist.append(Item(channel=item.channel, action="letras", title="Por orden alfabético", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=ovas"))) itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=ovas"))) itemlist.append(Item(channel=item.channel, action="series", title="En emisión", url=urlparse.urljoin(CHANNEL_HOST, "animes/en-emision/?orden=nombre&mostrar=ovas"), viewmode="movies_with_plot")) return itemlist def menupeliculas(item): logger.info("Mzero.channels.animeflv menupeliculas") itemlist = list() itemlist.append(Item(channel=item.channel, action="letras", title="Por orden alfabético", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=peliculas"))) itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros", url=urlparse.urljoin(CHANNEL_HOST, "animes/?orden=nombre&mostrar=peliculas"))) itemlist.append(Item(channel=item.channel, action="series", title="En emisión", url=urlparse.urljoin(CHANNEL_HOST, "animes/en-emision/?orden=nombre&mostrar=peliculas"), viewmode="movies_with_plot")) return itemlist def letras(item): logger.info("Mzero.channels.animeflv letras") itemlist = [] data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) data = scrapertools.get_match(data, '<div class="alfabeto_box"(.*?)</div>') patron = '<a href="([^"]+)[^>]+>([^<]+)</a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapertools.entityunescape(scrapedtitle) url = urlparse.urljoin(item.url, scrapedurl) thumbnail = "" plot = "" if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) itemlist.append(Item(channel=item.channel, action="series", title=title, url=url, thumbnail=thumbnail, plot=plot, viewmode="movies_with_plot")) return itemlist def generos(item): logger.info("Mzero.channels.animeflv generos") itemlist = [] data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) data = scrapertools.get_match(data, '<div class="generos_box"(.*?)</div>') patron = '<a href="([^"]+)[^>]+>([^<]+)</a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapertools.entityunescape(scrapedtitle) url = urlparse.urljoin(item.url, scrapedurl) thumbnail = "" plot = "" if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) itemlist.append(Item(channel=item.channel, action="series", title=title, url=url, thumbnail=thumbnail, plot=plot, viewmode="movies_with_plot")) return itemlist def search(item, texto): logger.info("Mzero.channels.animeflv search") if item.url == "": item.url = urlparse.urljoin(CHANNEL_HOST, "animes/?buscar=") texto = texto.replace(" ", "+") item.url = "{0}{1}".format(item.url, texto) return series(item) def newest(categoria): itemlist = [] if categoria == 'anime': itemlist = novedades_episodios(Item(url = "http://animeflv.net/")) return itemlist def novedades_episodios(item): logger.info("Mzero.channels.animeflv novedades") data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) ''' <div class="not"> <a href="/ver/cyclops-shoujo-saipu-12.html" title="Cyclops Shoujo Saipu 12"> <img class="imglstsr lazy" src="http://cdn.animeflv.net/img/mini/957.jpg" border="0"> <span class="tit_ep"><span class="tit">Cyclops Shoujo Saipu 12</span></span> </a> </div> ''' patronvideos = '<div class="not"[^<]+<a href="([^"]+)" title="([^"]+)"[^<]+<img class="[^"]+" ' \ 'src="([^"]+)"[^<]+<span class="tit_ep"><span class="tit">([^<]+)<' matches = re.compile(patronvideos, re.DOTALL).findall(data) itemlist = [] for match in matches: scrapedtitle = scrapertools.entityunescape(match[3]) fulltitle = scrapedtitle # directory = match[1] scrapedurl = urlparse.urljoin(item.url, match[0]) scrapedthumbnail = urlparse.urljoin(item.url, match[2].replace("mini", "portada")) scrapedplot = "" #if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(scrapedtitle, scrapedurl, scrapedthumbnail)) new_item = Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, fulltitle=fulltitle) content_title = scrapertools.entityunescape(match[1]) if content_title: episode = scrapertools.get_match(content_title, '\s+(\d+)$') content_title = content_title.replace(episode, '') season, episode = numbered_for_tratk(content_title, 1, episode) new_item.hasContentDetails = "true" new_item.contentTitle = content_title new_item.contentSeason = season new_item.contentEpisodeNumber = int(episode) itemlist.append(new_item) return itemlist def series(item): logger.info("Mzero.channels.animeflv series") data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) ''' <div class="aboxy_lista"> <a href="/ova/nurarihyon-no-mago-ova.html" title="Nurarihyon no Mago OVA"> <img class="lazy portada" src="/img/blank.gif" data-original="http://cdn.animeflv.net/img/portada/1026.jpg" alt="Nurarihyon no Mago OVA"/> </a> <span style="float: right; margin-top: 0px;" class="tipo_1"></span> <a href="/ova/nurarihyon-no-mago-ova.html" title="Nurarihyon no Mago OVA" class="titulo"> Nurarihyon no Mago OVA </a> <div class="generos_links"> <b>Generos:</b> <a href="/animes/genero/accion/">Acci&oacute;n</a>, <a href="/animes/genero/shonen/">Shonen</a>, <a href="/animes/genero/sobrenatural/">Sobrenatural</a> </div> <div class="sinopsis"> La historia empieza en alrededor de 100 a&ntilde;os despu&eacute;s de la desaparici&oacute;n de Yamabuki Otome, la primera esposa Rihan Nura. Rihan por fin recobr&oacute; la compostura y la vida vuelve a la normalidad. A medida que la cabeza del Clan Nura, est&aacute; ocupado trabajando en la construcci&oacute;n de un mundo armonioso para los seres humanos y youkai. Un d&iacute;a, &eacute;l ve a Setsura molesta por lo que decide animarla tomando el clan para ir a disfrutar de las aguas termales &hellip; </div> </div> ''' patron = '<div class="aboxy_lista"[^<]+' patron += '<a href="([^"]+)"[^<]+<img class="[^"]+" src="[^"]+" data-original="([^"]+)"[^<]+</a[^<]+' patron += '<span[^<]+</span[^<]+' patron += '<a[^>]+>([^<]+)</a.*?' patron += '<div class="sinopsis">(.*?)</div' matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches: title = scrapertools.unescape(scrapedtitle) fulltitle = title url = urlparse.urljoin(item.url, scrapedurl) thumbnail = urlparse.urljoin(item.url, scrapedthumbnail) plot = scrapertools.htmlclean(scrapedplot) show = title #if DEBUG:logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot, show=show, fulltitle=fulltitle, fanart=thumbnail, folder=True)) patron = '<a href="([^"]+)">\&raquo\;</a>' matches = re.compile(patron, re.DOTALL).findall(data) for match in matches: if len(matches) > 0: scrapedurl = urlparse.urljoin(item.url, match) scrapedtitle = ">> Pagina Siguiente" scrapedthumbnail = "" scrapedplot = "" itemlist.append(Item(channel=item.channel, action="series", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True, viewmode="movies_with_plot")) return itemlist def episodios(item): logger.info("Mzero.channels.animeflv episodios") itemlist = [] data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) ''' <div class="tit">Listado de episodios <span class="fecha_pr">Fecha Pr&oacute;ximo: 2013-06-11</span></div> <ul class="anime_episodios" id="listado_epis"> <li><a href="/ver/aiura-9.html">Aiura 9</a></li> <li><a href="/ver/aiura-8.html">Aiura 8</a></li> <li><a href="/ver/aiura-7.html">Aiura 7</a></li> <li><a href="/ver/aiura-6.html">Aiura 6</a></li> <li><a href="/ver/aiura-5.html">Aiura 5</a></li> <li><a href="/ver/aiura-4.html">Aiura 4</a></li> <li><a href="/ver/aiura-3.html">Aiura 3</a></li> <li><a href="/ver/aiura-2.html">Aiura 2</a></li> <li><a href="/ver/aiura-1.html">Aiura 1</a></li> </ul> ''' data = scrapertools.find_single_match(data, '<div class="tit">Listado de episodios.*?</div>(.*?)</ul>') patron = '<li><a href="([^"]+)">([^<]+)</a></li>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapertools.unescape(scrapedtitle) url = urlparse.urljoin(item.url, scrapedurl) thumbnail = item.thumbnail plot = item.plot # TODO crear funcion que pasandole el titulo y buscando en un array de series establezca el valor el nombre # y temporada / capitulo para que funcione con trak.tv season = 1 episode = 1 patron = re.escape(item.show) + "\s+(\d+)" # logger.info("title {0}".format(title)) # logger.info("patron {0}".format(patron)) try: episode = scrapertools.get_match(title, patron) episode = int(episode) # logger.info("episode {0}".format(episode)) except IndexError: pass except ValueError: pass episode_title = scrapertools.find_single_match(title, "\d+:\s*(.*)") if episode_title == "": episode_title = "Episodio "+str(episode) season, episode = numbered_for_tratk(item.show, season, episode) if len(str(episode)) == 1: title = str(season)+"x0"+str(episode) else: title = str(season)+"x"+str(episode) title = item.show+" - "+title+" "+episode_title #if DEBUG: logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(title, url, thumbnail)) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, show=item.show, fulltitle="{0} {1}" .format(item.show, title), fanart=thumbnail, viewmode="movies_with_plot", folder=True)) if config.get_library_support() and len(itemlist) > 0: itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist def findvideos(item): logger.info("Mzero.channels.animeflv findvideos") data = scrapertools.anti_cloudflare(item.url, headers=CHANNEL_DEFAULT_HEADERS, host=CHANNEL_HOST) # if 'infoLabels' in item: # del item.infoLabels url_anterior = scrapertools.find_single_match(data, '<a href="(/ver/[^"]+)".+?prev.png') url_siguiente = scrapertools.find_single_match(data, '<a href="(/ver/[^"]+)"[^.]+next.png') data = scrapertools.get_match(data, "var videos \= (.*?)$") itemlist = [] data = data.replace("\\\\", "") data = data.replace("\\/", "/") logger.info("data="+data) from core import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel = item.channel videoitem.show = item.show videoitem.folder = False if url_anterior: title_anterior = url_anterior.replace("/ver/", '').replace('-', ' ').replace('.html', '') itemlist.append(Item(channel=item.channel, action="findvideos", title="Anterior: " + title_anterior, url=CHANNEL_HOST + url_anterior, thumbnail=item.thumbnail, plot=item.plot, show=item.show, fanart=item.thumbnail, folder=True)) if url_siguiente: title_siguiente = url_siguiente.replace("/ver/", '').replace('-', ' ').replace('.html', '') itemlist.append(Item(channel=item.channel, action="findvideos", title="Siguiente: " + title_siguiente, url=CHANNEL_HOST + url_siguiente, thumbnail=item.thumbnail, plot=item.plot, show=item.show, fanart=item.thumbnail, folder=True)) return itemlist def numbered_for_tratk(show, season, episode): """ Devuelve la temporada y episodio convertido para que se marque correctamente en tratk.tv :param show: Nombre de la serie a comprobar :type show: str :param season: Temporada que devuelve el scrapper :type season: int :param episode: Episodio que devuelve el scrapper :type episode: int :return: season, episode :rtype: int, int """ logger.info("Mzero.channels.animeflv numbered_for_tratk") show = show.lower() new_season = season new_episode = episode dict_series = {} name_file = os.path.splitext(os.path.basename(__file__))[0] fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_data.json") if os.path.isfile(fname): data = "" try: f = open(fname, "r") for line in f: data += line f.close() except EnvironmentError: logger.info("ERROR al leer el archivo: {0}".format(fname)) json_data = jsontools.load_json(data) if 'TVSHOW_RENUMBER' in json_data: dict_series = json_data['TVSHOW_RENUMBER'] # ponemos en minusculas el key, ya que previamente hemos hecho lo mismo con show. for key in dict_series.keys(): new_key = key.lower() if new_key != key: dict_series[new_key] = dict_series[key] del dict_series[key] if show in dict_series: logger.info("ha encontrado algo: "+str(dict_series[show])) if len(dict_series[show]['season_episode']) > 1: for row in dict_series[show]['season_episode']: if new_episode > row[1]: new_episode -= row[1] new_season = row[0] break else: new_season = dict_series[show]['season_episode'][0][0] new_episode += dict_series[show]['season_episode'][0][1] logger.info("Mzero.channels.animeflv numbered_for_tratk: "+str(new_season)+":"+str(new_episode)) return new_season, new_episode
Mzero2010/MaxZone
plugin.video.Mzero/channels/animeflv.py
Python
gpl-3.0
19,445
from setuptools import setup, find_packages import sys import os import glob import configparser import re conf = [] templates = [] long_description = '''EasyEngine is the commandline tool to manage your Websites based on WordPress and Nginx with easy to use commands''' for name in glob.glob('config/plugins.d/*.conf'): conf.insert(1, name) for name in glob.glob('ee/cli/templates/*.mustache'): templates.insert(1, name) if not os.path.exists('/var/log/ee/'): os.makedirs('/var/log/ee/') if not os.path.exists('/var/lib/ee/'): os.makedirs('/var/lib/ee/') # EasyEngine git function config = configparser.ConfigParser() config.read(os.path.expanduser("~")+'/.gitconfig') try: ee_user = config['user']['name'] ee_email = config['user']['email'] except Exception as e: print("EasyEngine (ee) required your name & email address to track" " changes you made under the Git version control") print("EasyEngine (ee) will be able to send you daily reports & alerts in " "upcoming version") print("EasyEngine (ee) will NEVER send your information across") ee_user = input("Enter your name: ") while ee_user is "": print("Name not Valid, Please enter again") ee_user = input("Enter your name: ") ee_email = input("Enter your email: ") while not re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$", ee_email): print("Invalid email address, please try again") ee_email = input("Enter your email: ") os.system("git config --global user.name {0}".format(ee_user)) os.system("git config --global user.email {0}".format(ee_email)) setup(name='ee', version='3.3.6', description=long_description, long_description=long_description, classifiers=[], keywords='', author='rtCamp Soultions Pvt. LTD', author_email='ee@rtcamp.com', url='http://rtcamp.com/easyengine', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tests', 'templates']), include_package_data=True, zip_safe=False, test_suite='nose.collector', install_requires=[ # Required to build documentation # "Sphinx >= 1.0", # Required for testing # "nose", # "coverage", # Required to function 'cement == 2.4', 'pystache', 'python-apt', 'pynginxconfig', 'pymysql3 == 0.4', 'psutil == 3.1.1', 'sh', 'sqlalchemy', ], data_files=[('/etc/ee', ['config/ee.conf']), ('/etc/ee/plugins.d', conf), ('/usr/lib/ee/templates', templates), ('/etc/bash_completion.d/', ['config/bash_completion.d/ee_auto.rc']), ('/usr/share/man/man8/', ['docs/ee.8'])], setup_requires=[], entry_points=""" [console_scripts] ee = ee.cli.main:main """, namespace_packages=[], )
Jurisdesk/freedoms
setup.py
Python
mit
3,124
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2012 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## import contextlib import decimal from datetime import datetime import mock from stoqlib.database.runtime import get_current_branch from stoqlib.domain.sale import Sale from stoqlib.domain.till import Till from stoqlib.gui.dialogs.saledetails import SaleDetailsDialog from stoqlib.gui.editors.paymentseditor import SalePaymentsEditor from stoqlib.gui.editors.tilleditor import CashInEditor from stoqlib.gui.search.personsearch import ClientSearch from stoqlib.gui.search.salesearch import (SaleWithToolbarSearch, SoldItemsByBranchSearch) from stoqlib.gui.search.tillsearch import TillFiscalOperationsSearch from stoqlib.lib.dateutils import localtoday from stoq.gui.till import TillApp from stoq.gui.test.baseguitest import BaseGUITest class TestTill(BaseGUITest): def _check_run_dialog(self, action, dialog): with contextlib.nested( mock.patch('stoq.gui.till.TillApp.run_dialog'), mock.patch.object(self.store, 'commit'), mock.patch.object(self.store, 'close')) as ctx: self.activate(action) run_dialog = ctx[0] self.assertEquals(run_dialog.call_count, 1) args, kwargs = run_dialog.call_args called_dialog, store = args self.assertEquals(called_dialog, dialog) self.assertEquals(store, self.store) def test_initial_with_open_till(self): till = self.create_till() till.opening_date = localtoday() till.status = Till.STATUS_OPEN app = self.create_app(TillApp, u'till') self.check_app(app, u'till-opened-till') def test_initial_with_closed_till(self): app = self.create_app(TillApp, u'till') self.check_app(app, u'till-closed-till') @mock.patch('stoqlib.gui.fiscalprinter.api.new_store') @mock.patch('stoqlib.gui.fiscalprinter.yesno') @mock.patch('stoqlib.gui.fiscalprinter.run_dialog') def test_initial_with_blocked_till(self, yesno, new_store, run_dialog): till = self.create_till() till.opening_date = datetime(2015, 1, 2) till.status = Till.STATUS_OPEN new_store.return_value = self.store yesno.return_value = False app = self.create_app(TillApp, u'till') self.check_app(app, u'till-blocked-till') def test_select(self): sale = self.create_sale(branch=get_current_branch(self.store)) self.add_product(sale) sale.status = Sale.STATUS_CONFIRMED app = self.create_app(TillApp, u'till') app.status_filter.select(Sale.STATUS_CONFIRMED) results = app.results results.select(results[0]) @mock.patch('stoq.gui.till.run_dialog') def test_confirm_order(self, run_dialog): with contextlib.nested( mock.patch('stoqlib.gui.fiscalprinter.FiscalCoupon.confirm'), mock.patch('stoq.gui.till.api.new_store'), mock.patch.object(self.store, 'commit'), mock.patch.object(self.store, 'close')) as ctx: new_store = ctx[1] new_store.return_value = self.store sale = self.create_sale(branch=get_current_branch(self.store)) self.add_product(sale) sale.status = Sale.STATUS_ORDERED app = self.create_app(TillApp, u'till') app.status_filter.select(Sale.STATUS_ORDERED) results = app.results results.select(results[0]) self.activate(app.Confirm) confirm = ctx[0] confirm.assert_called_once_with( sale, self.store, subtotal=decimal.Decimal("10.00")) # Confirm a pre sale. wo_sale = self.create_sale(branch=get_current_branch(self.store)) wo_sale.status = Sale.STATUS_QUOTE wo_sale.add_sellable(self.create_sellable()) workorder = self.create_workorder() workorder.sale = wo_sale app.status_filter.select(Sale.STATUS_QUOTE) results.select(results[0]) self.activate(app.Confirm) run_dialog.assert_called_once_with(SalePaymentsEditor, app, self.store, wo_sale) self.assertEquals(sale.status, Sale.STATUS_ORDERED) @mock.patch('stoq.gui.till.api.new_store') def test_run_search_dialogs(self, new_store): new_store.return_value = self.store app = self.create_app(TillApp, u'till') self._check_run_dialog(app.SearchClient, ClientSearch) self._check_run_dialog(app.SearchSale, SaleWithToolbarSearch) self._check_run_dialog(app.SearchSoldItemsByBranch, SoldItemsByBranchSearch) self._check_run_dialog(app.SearchFiscalTillOperations, TillFiscalOperationsSearch) @mock.patch('stoq.gui.till.run_dialog') def test_run_details_dialog(self, run_dialog): sale = self.create_sale(branch=get_current_branch(self.store)) self.add_product(sale) sale.status = Sale.STATUS_ORDERED app = self.create_app(TillApp, u'till') app.status_filter.select(Sale.STATUS_ORDERED) results = app.results results.select(results[0]) self.activate(app.Details) self.assertEquals(run_dialog.call_count, 1) args, kwargs = run_dialog.call_args dialog, _app, store, sale_view = args self.assertEquals(dialog, SaleDetailsDialog) self.assertEquals(_app, app) self.assertTrue(store is not None) self.assertEquals(sale_view, results[0]) @mock.patch('stoq.gui.till.run_dialog') @mock.patch('stoqlib.api.new_store') def test_run_add_cash_dialog(self, new_store, run_dialog): new_store.return_value = self.store app = self.create_app(TillApp, u'till') with mock.patch.object(self.store, 'commit'): with mock.patch.object(self.store, 'close'): app.TillAddCash.set_sensitive(True) self.activate(app.TillAddCash) run_dialog.assert_called_once_with(CashInEditor, app, self.store) @mock.patch('stoq.gui.till.return_sale') @mock.patch('stoqlib.api.new_store') def test_return_sale(self, new_store, return_sale): new_store.return_value = self.store with self.sysparam(ALLOW_CANCEL_CONFIRMED_SALES=True): sale = self.create_sale(branch=get_current_branch(self.store)) self.add_product(sale) sale.status = Sale.STATUS_ORDERED app = self.create_app(TillApp, u'till') app.status_filter.select(Sale.STATUS_ORDERED) results = app.results results.select(results[0]) with mock.patch.object(self.store, 'commit'): with mock.patch.object(self.store, 'close'): self.activate(app.Return) return_sale.assert_called_once_with(app.get_toplevel(), results[0].sale, self.store)
tiagocardosos/stoq
stoq/gui/test/test_till.py
Python
gpl-2.0
8,078
############################################################################## # For copyright and license notices, see __manifest__.py file in module root # directory ############################################################################## from odoo import models, api class ProductProduct(models.Model): _inherit = 'product.product' @api.model def name_search(self, name, args=None, operator='ilike', limit=100): res = super().name_search( name, args=args, operator=operator, limit=limit) if not limit or len(res) < limit: # do not search for lots of products that are already displayed actual_product_ids = [x[0] for x in res] if not args: args = [] if name and name[0].encode('utf8') == ' ': name = name[1:] products = self.env['stock.production.lot'].search([ ('ean_128', operator, name), ('product_id', 'not in', actual_product_ids), ], limit=limit).mapped('product_id') if products: prods = self.search([('id', 'in', products.ids)] + args, limit=limit) res += prods.name_get() return res
ingadhoc/stock
stock_ean128/models/product_product.py
Python
agpl-3.0
1,232
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Fast-Fourier Transform ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import tensor_util as _tensor_util from tensorflow.python.ops import array_ops as _array_ops from tensorflow.python.ops import gen_spectral_ops from tensorflow.python.ops import manip_ops from tensorflow.python.ops import math_ops as _math_ops from tensorflow.python.util.tf_export import tf_export def _infer_fft_length_for_rfft(input_tensor, fft_rank): """Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`.""" # A TensorShape for the inner fft_rank dimensions. fft_shape = input_tensor.get_shape()[-fft_rank:] # If any dim is unknown, fall back to tensor-based math. if not fft_shape.is_fully_defined(): return _array_ops.shape(input_tensor)[-fft_rank:] # Otherwise, return a constant. return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32) def _infer_fft_length_for_irfft(input_tensor, fft_rank): """Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`.""" # A TensorShape for the inner fft_rank dimensions. fft_shape = input_tensor.get_shape()[-fft_rank:] # If any dim is unknown, fall back to tensor-based math. if not fft_shape.is_fully_defined(): fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:]) fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1)) return _array_ops.stack(fft_length) # Otherwise, return a constant. fft_length = fft_shape.as_list() if fft_length: fft_length[-1] = max(0, 2 * (fft_length[-1] - 1)) return _ops.convert_to_tensor(fft_length, _dtypes.int32) def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False): """Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims.""" fft_shape = _tensor_util.constant_value_as_shape(fft_length) # Edge case: skip padding empty tensors. if (input_tensor.shape.ndims is not None and any(dim.value == 0 for dim in input_tensor.shape.dims)): return input_tensor # If we know the shapes ahead of time, we can either skip or pre-compute the # appropriate paddings. Otherwise, fall back to computing paddings in # TensorFlow. if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None: # Slice the last FFT-rank dimensions from input_tensor's shape. input_fft_shape = input_tensor.shape[-fft_shape.ndims:] if input_fft_shape.is_fully_defined(): # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1. if is_reverse: fft_shape = fft_shape[:-1].concatenate( fft_shape.dims[-1].value // 2 + 1) paddings = [[0, max(fft_dim.value - input_dim.value, 0)] for fft_dim, input_dim in zip( fft_shape.dims, input_fft_shape.dims)] if any(pad > 0 for _, pad in paddings): outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims - fft_shape.ndims), 0) return _array_ops.pad(input_tensor, outer_paddings + paddings) return input_tensor # If we can't determine the paddings ahead of time, then we have to pad. If # the paddings end up as zero, tf.pad has a special-case that does no work. input_rank = _array_ops.rank(input_tensor) input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:] outer_dims = _math_ops.maximum(0, input_rank - fft_rank) outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype) # In reverse, we only pad the inner-most dimension to fft_length / 2 + 1. if is_reverse: fft_length = _array_ops.concat([fft_length[:-1], fft_length[-1:] // 2 + 1], 0) fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape) paddings = _array_ops.concat([outer_paddings, fft_paddings], 0) paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings], axis=1) return _array_ops.pad(input_tensor, paddings) def _rfft_wrapper(fft_fn, fft_rank, default_name): """Wrapper around gen_spectral_ops.rfft* that infers fft_length argument.""" def _rfft(input_tensor, fft_length=None, name=None): """Wrapper around gen_spectral_ops.rfft* that infers fft_length argument.""" with _ops.name_scope(name, default_name, [input_tensor, fft_length]) as name: input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.float32) if input_tensor.dtype not in (_dtypes.float32, _dtypes.float64): raise ValueError( "RFFT requires tf.float32 or tf.float64 inputs, got: %s" % input_tensor) real_dtype = input_tensor.dtype if real_dtype == _dtypes.float32: complex_dtype = _dtypes.complex64 else: assert real_dtype == _dtypes.float64 complex_dtype = _dtypes.complex128 input_tensor.shape.with_rank_at_least(fft_rank) if fft_length is None: fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank) else: fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length) fft_length_static = _tensor_util.constant_value(fft_length) if fft_length_static is not None: fft_length = fft_length_static return fft_fn(input_tensor, fft_length, Tcomplex=complex_dtype, name=name) _rfft.__doc__ = fft_fn.__doc__ return _rfft def _irfft_wrapper(ifft_fn, fft_rank, default_name): """Wrapper around gen_spectral_ops.irfft* that infers fft_length argument.""" def _irfft(input_tensor, fft_length=None, name=None): """Wrapper irfft* that infers fft_length argument.""" with _ops.name_scope(name, default_name, [input_tensor, fft_length]) as name: input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.complex64) input_tensor.shape.with_rank_at_least(fft_rank) if input_tensor.dtype not in (_dtypes.complex64, _dtypes.complex128): raise ValueError( "IRFFT requires tf.complex64 or tf.complex128 inputs, got: %s" % input_tensor) complex_dtype = input_tensor.dtype real_dtype = complex_dtype.real_dtype if fft_length is None: fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank) else: fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32) input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=True) fft_length_static = _tensor_util.constant_value(fft_length) if fft_length_static is not None: fft_length = fft_length_static return ifft_fn(input_tensor, fft_length, Treal=real_dtype, name=name) _irfft.__doc__ = ifft_fn.__doc__ return _irfft # FFT/IFFT 1/2/3D are exported via # third_party/tensorflow/core/api_def/python_api/ fft = gen_spectral_ops.fft ifft = gen_spectral_ops.ifft fft2d = gen_spectral_ops.fft2d ifft2d = gen_spectral_ops.ifft2d fft3d = gen_spectral_ops.fft3d ifft3d = gen_spectral_ops.ifft3d rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft") tf_export("signal.rfft", v1=["signal.rfft", "spectral.rfft"])(rfft) irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft") tf_export("signal.irfft", v1=["signal.irfft", "spectral.irfft"])(irfft) rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d") tf_export("signal.rfft2d", v1=["signal.rfft2d", "spectral.rfft2d"])(rfft2d) irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d") tf_export("signal.irfft2d", v1=["signal.irfft2d", "spectral.irfft2d"])(irfft2d) rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d") tf_export("signal.rfft3d", v1=["signal.rfft3d", "spectral.rfft3d"])(rfft3d) irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d") tf_export("signal.irfft3d", v1=["signal.irfft3d", "spectral.irfft3d"])(irfft3d) def _fft_size_for_grad(grad, rank): return _math_ops.reduce_prod(_array_ops.shape(grad)[-rank:]) @_ops.RegisterGradient("FFT") def _fft_grad(_, grad): size = _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype) return ifft(grad) * size @_ops.RegisterGradient("IFFT") def _ifft_grad(_, grad): rsize = _math_ops.cast( 1. / _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype.real_dtype), grad.dtype) return fft(grad) * rsize @_ops.RegisterGradient("FFT2D") def _fft2d_grad(_, grad): size = _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype) return ifft2d(grad) * size @_ops.RegisterGradient("IFFT2D") def _ifft2d_grad(_, grad): rsize = _math_ops.cast( 1. / _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype.real_dtype), grad.dtype) return fft2d(grad) * rsize @_ops.RegisterGradient("FFT3D") def _fft3d_grad(_, grad): size = _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype) return ifft3d(grad) * size @_ops.RegisterGradient("IFFT3D") def _ifft3d_grad(_, grad): rsize = _math_ops.cast( 1. / _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype.real_dtype), grad.dtype) return fft3d(grad) * rsize def _rfft_grad_helper(rank, irfft_fn): """Returns a gradient function for an RFFT of the provided rank.""" # Can't happen because we don't register a gradient for RFFT3D. assert rank in (1, 2), "Gradient for RFFT3D is not implemented." def _grad(op, grad): """A gradient function for RFFT with the provided `rank` and `irfft_fn`.""" fft_length = op.inputs[1] complex_dtype = grad.dtype real_dtype = complex_dtype.real_dtype input_shape = _array_ops.shape(op.inputs[0]) is_even = _math_ops.cast(1 - (fft_length[-1] % 2), complex_dtype) def _tile_for_broadcasting(matrix, t): expanded = _array_ops.reshape( matrix, _array_ops.concat([ _array_ops.ones([_array_ops.rank(t) - 2], _dtypes.int32), _array_ops.shape(matrix) ], 0)) return _array_ops.tile( expanded, _array_ops.concat([_array_ops.shape(t)[:-2], [1, 1]], 0)) def _mask_matrix(length): """Computes t_n = exp(sqrt(-1) * pi * n^2 / line_len).""" # TODO(rjryan): Speed up computation of twiddle factors using the # following recurrence relation and cache them across invocations of RFFT. # # t_n = exp(sqrt(-1) * pi * n^2 / line_len) # for n = 0, 1,..., line_len-1. # For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2 a = _array_ops.tile( _array_ops.expand_dims(_math_ops.range(length), 0), (length, 1)) b = _array_ops.transpose(a, [1, 0]) return _math_ops.exp( -2j * np.pi * _math_ops.cast(a * b, complex_dtype) / _math_ops.cast(length, complex_dtype)) def _ymask(length): """A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`.""" return _math_ops.cast(1 - 2 * (_math_ops.range(length) % 2), complex_dtype) y0 = grad[..., 0:1] if rank == 1: ym = grad[..., -1:] extra_terms = y0 + is_even * ym * _ymask(input_shape[-1]) elif rank == 2: # Create a mask matrix for y0 and ym. base_mask = _mask_matrix(input_shape[-2]) # Tile base_mask to match y0 in shape so that we can batch-matmul the # inner 2 dimensions. tiled_mask = _tile_for_broadcasting(base_mask, y0) y0_term = _math_ops.matmul(tiled_mask, _math_ops.conj(y0)) extra_terms = y0_term ym = grad[..., -1:] ym_term = _math_ops.matmul(tiled_mask, _math_ops.conj(ym)) inner_dim = input_shape[-1] ym_term = _array_ops.tile( ym_term, _array_ops.concat([ _array_ops.ones([_array_ops.rank(grad) - 1], _dtypes.int32), [inner_dim] ], 0)) * _ymask(inner_dim) extra_terms += is_even * ym_term # The gradient of RFFT is the IRFFT of the incoming gradient times a scaling # factor, plus some additional terms to make up for the components dropped # due to Hermitian symmetry. input_size = _math_ops.cast( _fft_size_for_grad(op.inputs[0], rank), real_dtype) the_irfft = irfft_fn(grad, fft_length) return 0.5 * (the_irfft * input_size + _math_ops.real(extra_terms)), None return _grad def _irfft_grad_helper(rank, rfft_fn): """Returns a gradient function for an IRFFT of the provided rank.""" # Can't happen because we don't register a gradient for IRFFT3D. assert rank in (1, 2), "Gradient for IRFFT3D is not implemented." def _grad(op, grad): """A gradient function for IRFFT with the provided `rank` and `rfft_fn`.""" # Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs # and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the # graph we special-case the situation where the FFT length and last # dimension of the input are known at graph construction time. fft_length = op.inputs[1] fft_length_static = _tensor_util.constant_value(fft_length) if fft_length_static is not None: fft_length = fft_length_static real_dtype = grad.dtype if real_dtype == _dtypes.float32: complex_dtype = _dtypes.complex64 elif real_dtype == _dtypes.float64: complex_dtype = _dtypes.complex128 is_odd = _math_ops.mod(fft_length[-1], 2) input_last_dimension = _array_ops.shape(op.inputs[0])[-1] mask = _array_ops.concat( [[1.0], 2.0 * _array_ops.ones( [input_last_dimension - 2 + is_odd], real_dtype), _array_ops.ones([1 - is_odd], real_dtype)], 0) rsize = _math_ops.reciprocal(_math_ops.cast( _fft_size_for_grad(grad, rank), real_dtype)) # The gradient of IRFFT is the RFFT of the incoming gradient times a scaling # factor and a mask. The mask scales the gradient for the Hermitian # symmetric components of the RFFT by a factor of two, since these # components are de-duplicated in the RFFT. the_rfft = rfft_fn(grad, fft_length) return the_rfft * _math_ops.cast(rsize * mask, complex_dtype), None return _grad @tf_export("signal.fftshift") def fftshift(x, axes=None, name=None): """Shift the zero-frequency component to the center of the spectrum. This function swaps half-spaces for all axes listed (defaults to all). Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. @compatibility(numpy) Equivalent to numpy.fft.fftshift. https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html @end_compatibility For example: ```python x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) ``` Args: x: `Tensor`, input tensor. axes: `int` or shape `tuple`, optional Axes over which to shift. Default is None, which shifts all axes. name: An optional name for the operation. Returns: A `Tensor`, The shifted tensor. """ with _ops.name_scope(name, "fftshift") as name: x = _ops.convert_to_tensor(x) if axes is None: axes = tuple(range(x.shape.ndims)) shift = _array_ops.shape(x) // 2 elif isinstance(axes, int): shift = _array_ops.shape(x)[axes] // 2 else: rank = _array_ops.rank(x) # allows negative axis axes = _array_ops.where(_math_ops.less(axes, 0), axes + rank, axes) shift = _array_ops.gather(_array_ops.shape(x), axes) // 2 return manip_ops.roll(x, shift, axes, name) @tf_export("signal.ifftshift") def ifftshift(x, axes=None, name=None): """The inverse of fftshift. Although identical for even-length x, the functions differ by one sample for odd-length x. @compatibility(numpy) Equivalent to numpy.fft.ifftshift. https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftshift.html @end_compatibility For example: ```python x = tf.signal.ifftshift([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]]) x.numpy() # array([[ 4., -4., 3.],[-2., -1., -3.],[ 1., 2., 0.]]) ``` Args: x: `Tensor`, input tensor. axes: `int` or shape `tuple` Axes over which to calculate. Defaults to None, which shifts all axes. name: An optional name for the operation. Returns: A `Tensor`, The shifted tensor. """ with _ops.name_scope(name, "ifftshift") as name: x = _ops.convert_to_tensor(x) if axes is None: axes = tuple(range(x.shape.ndims)) shift = -(_array_ops.shape(x) // 2) elif isinstance(axes, int): shift = -(_array_ops.shape(x)[axes] // 2) else: rank = _array_ops.rank(x) # allows negative axis axes = _array_ops.where(_math_ops.less(axes, 0), axes + rank, axes) shift = -(_array_ops.gather(_array_ops.shape(x), axes) // 2) return manip_ops.roll(x, shift, axes, name) _ops.RegisterGradient("RFFT")(_rfft_grad_helper(1, irfft)) _ops.RegisterGradient("IRFFT")(_irfft_grad_helper(1, rfft)) _ops.RegisterGradient("RFFT2D")(_rfft_grad_helper(2, irfft2d)) _ops.RegisterGradient("IRFFT2D")(_irfft_grad_helper(2, rfft2d))
gunan/tensorflow
tensorflow/python/ops/signal/fft_ops.py
Python
apache-2.0
18,047
from unittest import TestCase from mediawords.languages.zh import ChineseLanguage # noinspection SpellCheckingInspection class TestChineseLanguage(TestCase): def setUp(self): self.__tokenizer = ChineseLanguage() def test_language_code(self): assert self.__tokenizer.language_code() == "zh" def test_sample_sentence(self): assert len(self.__tokenizer.sample_sentence()) def test_stop_words_map(self): stop_words = self.__tokenizer.stop_words_map() assert "不起" in stop_words assert "not_a_stopword" not in stop_words def test_stem(self): assert self.__tokenizer.stem_words(['abc']) == ['abc'] def test_split_text_to_sentences(self): # noinspection PyTypeChecker assert self.__tokenizer.split_text_to_sentences(None) == [] assert self.__tokenizer.split_text_to_sentences("") == [] assert self.__tokenizer.split_text_to_sentences(" ") == [] assert self.__tokenizer.split_text_to_sentences(".") == ["."] # English-only punctuation sentences = self.__tokenizer.split_text_to_sentences( "Hello. How do you do? I'm doing okay." ) assert sentences == [ "Hello.", "How do you do?", "I'm doing okay.", ] # English-only punctuation, no period at the end of sentence sentences = self.__tokenizer.split_text_to_sentences( "Hello. How do you do? I'm doing okay" ) assert sentences == [ "Hello.", "How do you do?", "I'm doing okay", ] # Chinese-only punctuation sentences = self.__tokenizer.split_text_to_sentences( "問責制既不能吸引政治人才加入政府。" "時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。" "她一出生就被父母遺棄,住在八里愛心教養院。" "堆填區個綠色真係靚,心曠神怡。" ) assert sentences == [ "問責制既不能吸引政治人才加入政府。", "時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。", "她一出生就被父母遺棄,住在八里愛心教養院。", "堆填區個綠色真係靚,心曠神怡。", ] sentences = self.__tokenizer.split_text_to_sentences(""" 范妮·伊姆利,是英国女权主义者玛丽·沃斯通克拉夫特与美国商人吉尔伯特·伊姆利的私生女。 在范妮出生不久,伊姆利便将沃斯通克拉夫特抛弃在了法国大革命日趋混乱的局势之中。 在经历了这次失意的爱情后,沃斯通克拉夫特与哲学家戈德温建立了亲密的关系,并最终与他结婚。 1797年,沃斯通克拉夫特死于产后并发症,将三岁的范妮与新生的玛丽·沃斯通克拉夫特·戈德温留给了戈德温一人抚育。 四年后,戈德温与第二任妻子结婚,范妮姐妹俩都不喜欢新的戈德温太太。 1814年,年少的玛丽与新戈德温太太带来的女儿克莱尔·克莱尔蒙特一同离家出走,并与浪漫主义诗人雪莱前往了欧洲大陆。 独自留下的范妮于1816年服毒自杀,时年22岁。 """) assert sentences == [ '范妮·伊姆利,是英国女权主义者玛丽·沃斯通克拉夫特与美国商人吉尔伯特·伊姆利的私生女。', '在范妮出生不久,伊姆利便将沃斯通克拉夫特抛弃在了法国大革命日趋混乱的局势之中。', '在经历了这次失意的爱情后,沃斯通克拉夫特与哲学家戈德温建立了亲密的关系,并最终与他结婚。', '1797年,沃斯通克拉夫特死于产后并发症,将三岁的范妮与新生的玛丽·沃斯通克拉夫特·戈德温留给了戈德温一人抚育。', '四年后,戈德温与第二任妻子结婚,范妮姐妹俩都不喜欢新的戈德温太太。', '1814年,年少的玛丽与新戈德温太太带来的女儿克莱尔·克莱尔蒙特一同离家出走,并与浪漫主义诗人雪莱前往了欧洲大陆。', '独自留下的范妮于1816年服毒自杀,时年22岁。', ] # Chinese-only punctuation, no EOS at the end of the sentence sentences = self.__tokenizer.split_text_to_sentences( "問責制既不能吸引政治人才加入政府。" "時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。" "她一出生就被父母遺棄,住在八里愛心教養院。" "堆填區個綠色真係靚,心曠神怡" ) assert sentences == [ "問責制既不能吸引政治人才加入政府。", "時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。", "她一出生就被父母遺棄,住在八里愛心教養院。", "堆填區個綠色真係靚,心曠神怡", ] # Chinese and English punctuation sentences = self.__tokenizer.split_text_to_sentences( "問責制既不能吸引政治人才加入政府。" "This is some English text out of the blue. " "時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。" "This is some more English text." ) assert sentences == [ "問責制既不能吸引政治人才加入政府。", "This is some English text out of the blue.", "時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。", "This is some more English text.", ] # Chinese and English punctuation (with newlines) sentences = self.__tokenizer.split_text_to_sentences("""問責制既不能吸引政治人才加入政府。 This is some English text out of the blue. 時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。 This is some more English text. This is some more English text. Dsds. """) assert sentences == [ "問責制既不能吸引政治人才加入政府。", "This is some English text out of the blue.", "時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。", "This is some more English text.", "This is some more English text.", "Dsds.", ] # Chinese and English sentences separates by double-newlines # (test has extra whitespace between line breaks) sentences = self.__tokenizer.split_text_to_sentences(""" 問責制既不能吸引政治人才加入政府 This is some English text out of the blue 香港的主要官員問責制(下稱「問責制」)於2002年由當時的特首董建華提出 This is some more English text """) assert sentences == [ "問責制既不能吸引政治人才加入政府", "This is some English text out of the blue", "香港的主要官員問責制(下稱「問責制」)於2002年由當時的特首董建華提出", "This is some more English text", ] # Chinese and English sentences in a list # (test has extra whitespace between line breaks) sentences = self.__tokenizer.split_text_to_sentences(""" 問責制既不能吸引政治人才加入政府 * This is some English text out of the blue. Some more English text. * 本文會從幾方面討論問責制的成效和影響。首先是行政領導和問責制的制度設計問題。 This is some more English text """) assert sentences == [ "問責制既不能吸引政治人才加入政府", "* This is some English text out of the blue.", "Some more English text.", "* 本文會從幾方面討論問責制的成效和影響。", "首先是行政領導和問責制的制度設計問題。", "This is some more English text", ] def test_split_sentence_to_words(self): # noinspection PyTypeChecker assert self.__tokenizer.split_sentence_to_words(None) == [] assert self.__tokenizer.split_sentence_to_words("") == [] assert self.__tokenizer.split_sentence_to_words(" ") == [] assert self.__tokenizer.split_sentence_to_words(".") == [] # English sentence words = self.__tokenizer.split_sentence_to_words("How do you do?") assert words == [ "How", "do", "you", "do", ] # English sentence, no period at the end of the sentence words = self.__tokenizer.split_sentence_to_words("How do you do") assert words == [ "How", "do", "you", "do", ] # English sentence, literal string "EOS" words = self.__tokenizer.split_sentence_to_words("EOS this, EOS that.") assert words == [ "EOS", "this", "EOS", "that", ] # English sentence; tab, newline and comma characters words = self.__tokenizer.split_sentence_to_words( "Something\tSomething else\nSomething, completely, different." ) assert words == [ "Something", "Something", "else", "Something", "completely", "different", ] # Chinese sentence words = self.__tokenizer.split_sentence_to_words( "時任政務司長林鄭月娥被指在未有公開諮詢下,突然宣布西九文化區興建故宮博物館,並委聘建築師嚴迅奇擔任設計顧問,被立法會議員向廉政公署舉報。" ) assert words == [ '時任', '政務司長', '林鄭月娥', '被', '指', '在', '未有', '公開', '諮詢', '下', '突然', '宣布', '西九文化區', '興建', '故宮博物館', '並', '委聘', '建築師', '嚴迅奇', '擔任', '設計顧問', '被', '立法會', '議員', '向', '廉政公署', '舉報', ] # Tokenize names of top political figures or celebrities words = self.__tokenizer.split_sentence_to_words( "習近平王毅黃毓民汤家骅" ) assert words == [ "習近平", "王毅", "黃毓民", "汤家骅", ] # Chinese + English sentence words = self.__tokenizer.split_sentence_to_words("他建議想學好英文,必須人格分裂、要代入外國人的思想(mindset)。") assert words == [ "他", "建議", "想", "學好", "英文", "必須", "人格分裂", "要", "代入", "外國人", "的", "思想", "mindset", ] # Chinese punctuation words = self.__tokenizer.split_sentence_to_words( "Badger、badger。Badger・Badger『badger』「Badger」badger?Badger!Badger!?" "Badger【badger】Badger~badger(badger)《Badger》,badger;badger……badger:badger" ) assert words == [ 'Badger', 'badger', 'Badger', 'Badger', 'badger', 'Badger', 'badger', 'Badger', 'Badger', 'Badger', 'badger', 'Badger', 'badger', 'badger', 'Badger', 'badger', 'badger', 'badger', 'badger', ]
berkmancenter/mediacloud
apps/common/tests/python/mediawords/languages/test_zh.py
Python
agpl-3.0
13,029
# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import interface from cinder.volume.drivers.dothill import dothill_iscsi from cinder.volume.drivers.san.hp import hpmsa_common @interface.volumedriver class HPMSAISCSIDriver(dothill_iscsi.DotHillISCSIDriver): """OpenStack iSCSI cinder drivers for HPMSA arrays. Version history: 1.0 - Inheriting from DotHill cinder drivers. """ VERSION = "1.0" def __init__(self, *args, **kwargs): super(HPMSAISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(hpmsa_common.common_opts) self.configuration.append_config_values(hpmsa_common.iscsi_opts) self.iscsi_ips = self.configuration.hpmsa_iscsi_ips def _init_common(self): return hpmsa_common.HPMSACommon(self.configuration)
bswartz/cinder
cinder/volume/drivers/san/hp/hpmsa_iscsi.py
Python
apache-2.0
1,438
from ..utils import * # Injured Blademaster class CS2_181: play = Hit(SELF, 4) # Young Priestess class EX1_004: events = OWN_TURN_END.on(Buff(RANDOM_OTHER_FRIENDLY_MINION, "EX1_004e")) # Alarm-o-Bot class EX1_006: events = OWN_TURN_BEGIN.on(Swap(SELF, RANDOM(CONTROLLER_HAND + MINION))) # Angry Chicken class EX1_009: enrage = Refresh(SELF, {GameTag.ATK: +5}) # Twilight Drake class EX1_043: play = Buff(SELF, "EX1_043e") * Count(CONTROLLER_HAND) # Questing Adventurer class EX1_044: events = OWN_CARD_PLAY.on(Buff(SELF, "EX1_044e")) # Coldlight Oracle class EX1_050: play = Draw(ALL_PLAYERS) * 2 # Mana Addict class EX1_055: events = OWN_SPELL_PLAY.on(Buff(SELF, "EX1_055o")) # Sunfury Protector class EX1_058: play = Taunt(SELF_ADJACENT) # Crazed Alchemist class EX1_059: play = SwapAttackAndHealth(TARGET, "EX1_059e") # Pint-Sized Summoner class EX1_076: update = ( (Attr(CONTROLLER, GameTag.NUM_MINIONS_PLAYED_THIS_TURN) == 0) & Refresh(FRIENDLY + MINION + IN_HAND, {GameTag.COST: -1}) ) # Secretkeeper class EX1_080: events = OWN_SECRET_PLAY.on(Buff(SELF, "EX1_080o")) # Mind Control Tech class EX1_085: play = Find(ENEMY_MINIONS, 4) & Steal(RANDOM_ENEMY_MINION) # Arcane Golem class EX1_089: play = GainMana(OPPONENT, 1) # Defender of Argus class EX1_093: play = Buff(SELF_ADJACENT, "EX1_093e") # Gadgetzan Auctioneer class EX1_095: events = OWN_SPELL_PLAY.on(Draw(CONTROLLER)) # Abomination class EX1_097: deathrattle = Hit(ALL_CHARACTERS, 2) # Coldlight Seer class EX1_103: play = Buff(ALL_MINIONS + MURLOC - SELF, "EX1_103e") # Azure Drake class EX1_284: play = Draw(CONTROLLER) # Murloc Tidecaller class EX1_509: events = Summon(ALL_PLAYERS, MURLOC).on(Buff(SELF, "EX1_509e")) # Ancient Mage class EX1_584: play = Buff(SELF_ADJACENT, "EX1_584e") # Imp Master class EX1_597: events = OWN_TURN_END.on(Hit(SELF, 1), Summon(CONTROLLER, "EX1_598")) # Mana Wraith class EX1_616: update = Refresh(MINION + IN_HAND, {GameTag.COST: +1}) # Knife Juggler class NEW1_019: events = Summon(CONTROLLER, MINION - SELF).after(Hit(RANDOM_ENEMY_CHARACTER, 1)) # Wild Pyromancer class NEW1_020: events = OWN_SPELL_PLAY.after(Hit(ALL_MINIONS, 1)) # Bloodsail Corsair class NEW1_025: play = Hit(ENEMY_WEAPON, 1) # Violet Teacher class NEW1_026: events = OWN_SPELL_PLAY.on(Summon(CONTROLLER, "NEW1_026t")) # Master Swordsmith class NEW1_037: events = OWN_TURN_END.on(Buff(RANDOM_OTHER_FRIENDLY_MINION, "NEW1_037e")) # Stampeding Kodo class NEW1_041: play = Destroy(RANDOM(ENEMY_MINIONS + (ATK <= 2)))
liujimj/fireplace
fireplace/cards/classic/neutral_rare.py
Python
agpl-3.0
2,580
import pygame import numpy as np import time class Robot(object): def __init__(self, x, y, angle, width, height, filename): self.x = x self.y = y self.angle = angle self._v = 0.0 self._r = 0.0 self.set_image(filename) self._width = width self._height = height self.scale = 1.0 self._color = (0,0,0) self._alpha = 255 @property def state(self): return np.array([self.x, self.y, self.angle]) @property def pos(self): return np.array([self.x, self.y]) @property def width(self): return int(self.scale * self._width + 0.5) @property def height(self): return int(self.scale * self._height + 0.5) @property def size(self): return np.array([self.width, self.height]) @property def color(self): return self._color + (self._alpha,) def set_color(self, r, g, b, a = None): self._color = (r,g,b) if a is not None: self.set_alpha(a) def set_alpha(self, alpha): self._alpha = alpha def set_image(self, filename): self._image = pygame.image.load(filename).convert() @property def image(self): image = pygame.Surface(self._image.get_size(), pygame.SRCALPHA) center1 = np.array(self._image.get_rect().center) rot = pygame.transform.rotate(self._image, self.angle) center2 = np.array(rot.get_rect().center) rot.set_colorkey(rot.get_at((0,0))) image.blit(rot, (center1-center2)) image.fill(self.color, special_flags=pygame.BLEND_RGBA_MULT) return pygame.transform.smoothscale(image, self.size) def draw(self, image, pos=None): if pos is None: pos = np.mod(self.pos, image.get_size()) pos -= self.size/2 image.blit(self.image, pos) def accelerate(self, vx, dt): self._v += vx * dt def rotate(self, vr, dt): self._r += vr * dt def update(self, dt): self.x += np.sin(self.angle/180.0*np.pi) * self._v * dt self.y += np.cos(self.angle/180.0*np.pi) * self._v * dt self.angle += self._r * dt self._v -= (1.0*self._v) * dt self._r -= (2.5*self._r) * dt
Bjarne-AAU/MonteCarloLocalization
Robot.py
Python
gpl-3.0
2,250
# This file is part of Libreosteo. # # Libreosteo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Libreosteo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Libreosteo. If not, see <http://www.gnu.org/licenses/>.
littlejo/Libreosteo
libreosteoweb/api/events/__init__.py
Python
gpl-3.0
667
#!/usr/bin/python from os import walk import re, time, datetime, ConfigParser, sys, os, subprocess, gzip def print_usage(script): print 'Usage:', script, '--config <config file>', '--dir <target backup directory>', '--pf <mysql password file>' sys.exit(1) def check_location(file, desc): expandfile = os.path.expanduser(file) if not os.path.exists(expandfile): print 'Error:', desc, file, 'was not found' sys.exit(1) else: return expandfile # read configuration file and input parameters def init_config(args): config = { 'MAIN.BackupDir' : '', 'MAIN.MySqlUserFile' : '', 'PURGE.DaysToKeep' : '', 'BACKUP.DirsToBackup' : '' } # check if config file was provided and assign it to variable if not '--config' in args and not os.access(os.path.expanduser('~/sitebackup/etc/mysqlbkp.cfg'), os.R_OK): print 'Error: Configuration file was not found' print_usage(args[0]) else: configfile = ConfigParser.SafeConfigParser() if not '--config' in args: configfile.read(os.path.expanduser('~/sitebackup/etc/mysqlbkp.cfg')) config['MAIN.MySqlUserFile'] = check_location(os.path.expanduser('~/sitebackup/etc/mysqlbkp.cfg'), 'Configuration file') else: configfile.read(check_location(args[args.index('--config')+1], 'Configuration file')) config['MAIN.MySqlUserFile'] = check_location(args[args.index('--config')+1], 'Configuration file') args.pop(args.index('--config')+1) args.pop(args.index('--config')) # Parsing of comandline parameters if '--dir' in args: config['MAIN.BackupDir'] = check_location(args[args.index('--dir') + 1], 'Backup directory') args.pop(args.index('--dir') + 1) args.pop(args.index('--dir')) else: config['MAIN.BackupDir'] = check_location(configfile.get('MAIN', 'BackupDir'), 'Backup directory') if len(args) > 1: print args print_usage(args[0]) # Populate configuration structure from config file config['PURGE.DaysToKeep'] = configfile.get('PURGE', 'DaysToKeep') config['BACKUP.DirsToBackup'] = configfile.get('BACKUP', 'DirsToBackup') return config # create list of databases which are available for provided mysql user, this user has to have SELECT and LOCK TABLE for this database def mysql_dblist(cnf): no_backup = ['Database', 'information_schema', 'performance_schema', 'test'] cmd = ['mysql', '--defaults-extra-file='+cnf, '-e', 'show databases'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode > 0: print 'MySQL Error:' print stderr sys.exit(1) dblist = stdout.strip().split('\n') for item in no_backup: try: dblist.remove(item) except ValueError: continue if len(dblist) == 0: print "Doesn't appear to be any databases found" sys.exit(1) return dblist # backup of databases which are available for provided mysql user, this user has to have SELECT and LOCK TABLE for this database def mysql_backup(config, dblist): bresults = {} ufile = os.path.expanduser(config['MAIN.MySqlUserFile']) for db in dblist: bfile = os.path.join(os.path.expanduser(config['MAIN.BackupDir']), db+'_'+datetime.datetime.now().strftime('%Y%m%d%H%M')+'.sql') if db == 'mysql': cmd = ['mysqldump', '--defaults-extra-file='+ufile, '--max_allowed_packet=512M', '--events', db] else: cmd = ['mysqldump', '--defaults-extra-file='+ufile, '--max_allowed_packet=512M', '--skip-extended-insert', '--quick', '--single-transaction', db] # 3 attempts to backup database, sometimes backup can fail without any good reason(server watchdog), in this case we would like to try it again i = 1 while i != 3: try: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) outp = p.communicate()[0] gz = gzip.open(bfile+'.gz','wb') gz.write(outp) retcode = p.wait() except: retcode = 255 gz.close() # re-point symlink to the latest database backup if os.access(os.path.join(os.path.expanduser(config['MAIN.BackupDir']),'current-'+db+'.sql.gz'), os.F_OK): os.unlink(os.path.join(os.path.expanduser(config['MAIN.BackupDir']),'current-'+db+'.sql.gz')) os.symlink(bfile+'.gz',os.path.join(os.path.expanduser(config['MAIN.BackupDir']),'current-'+db+'.sql.gz')) # backup was not successful - remove partially completed file and inc counter if retcode > 0: i += 1 bresults[db] = 'backup error' if os.path.exists(bfile): os.remove(bfile) else: # backup completed successfully - we have to reset couner for the next one if there are any and break 'while' cycle i = 3 bresults[db] = 'backup ok' break # prepare list of files for backup, which will be downloaded over sftp and list of directories, which will be created on destination host def fs_backup(config): flist = os.path.join(os.path.expanduser(config['MAIN.BackupDir']), 'filelist') dlist = os.path.join(os.path.expanduser(config['MAIN.BackupDir']), 'dirlist') f = open(flist,'w') d = open(dlist,'w') for (dirpath, dirnames, filenames) in walk(os.path.expanduser(config['BACKUP.DirsToBackup'])): if filenames: d.write(dirpath+'\n') for fn in filenames: fpath = os.path.join(dirpath,fn) try: f.write(fpath+'|'+str(os.path.getsize(fpath))+'|'+str(os.path.getmtime(fpath))+'\n') except: print 'Could not get info for', fpath f.close() d.close() # purging old database backups according settings in configuration file def purge(config): print "Removing database backup older than",config['PURGE.DaysToKeep'],"days" os.chdir(os.path.abspath(config['MAIN.BackupDir'])) pb = [d for d in os.listdir('.') if os.path.isfile(d) and os.path.getmtime(os.path.abspath(d)) < time.time()-24*60*60*int(config['PURGE.DaysToKeep']) ] for d in pb: if re.search('gz$',d) != 'None': print " Removing",d,"..." os.remove(os.path.abspath(d)) def main(): config = init_config(sys.argv) dblist = mysql_dblist(config['MAIN.MySqlUserFile']) mysql_backup(config, dblist) fs_backup(config) purge(config) if __name__ == '__main__': main()
eugenebobkov/sitebackup
bin/mysqlbkp2.py
Python
gpl-3.0
6,653
# Project imports import os import sys import hashlib import random import re import shutil import string import tempfile import time sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))) sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))) import helper from elodie.media.base import Base, get_all_subclasses from elodie.media.media import Media from elodie.media.audio import Audio from elodie.media.text import Text from elodie.media.photo import Photo from elodie.media.video import Video os.environ['TZ'] = 'GMT' setup_module = helper.setup_module teardown_module = helper.teardown_module def test_get_all_subclasses(): subclasses = get_all_subclasses(Base) expected = {Media, Base, Text, Photo, Video, Audio} assert subclasses == expected, subclasses def test_get_class_by_file_without_extension(): base_file = helper.get_file('withoutextension') cls = Base.get_class_by_file(base_file, [Audio, Text, Photo, Video]) assert cls is None, cls def test_get_original_name(): temporary_folder, folder = helper.create_working_folder() origin = '%s/%s' % (folder, 'with-original-name.jpg') file = helper.get_file('with-original-name.jpg') shutil.copyfile(file, origin) media = Media.get_class_by_file(origin, [Photo]) original_name = media.get_original_name() assert original_name == 'originalfilename.jpg', original_name def test_get_original_name_invalid_file(): temporary_folder, folder = helper.create_working_folder() origin = '%s/%s' % (folder, 'invalid.jpg') file = helper.get_file('invalid.jpg') shutil.copyfile(file, origin) media = Media.get_class_by_file(origin, [Photo]) original_name = media.get_original_name() assert original_name is None, original_name def test_set_album_from_folder_invalid_file(): temporary_folder, folder = helper.create_working_folder() base_file = helper.get_file('invalid.jpg') origin = '%s/invalid.jpg' % folder shutil.copyfile(base_file, origin) base = Base(origin) status = base.set_album_from_folder() assert status == False, status def test_set_album_from_folder(): temporary_folder, folder = helper.create_working_folder() origin = '%s/photo.jpg' % folder shutil.copyfile(helper.get_file('plain.jpg'), origin) photo = Photo(origin) metadata = photo.get_metadata() assert metadata['album'] is None, metadata['album'] new_album_name = os.path.split(folder)[1] status = photo.set_album_from_folder() assert status == True, status photo_new = Photo(origin) metadata_new = photo_new.get_metadata() shutil.rmtree(folder) assert metadata_new['album'] == new_album_name, metadata_new['album'] def test_set_metadata(): temporary_folder, folder = helper.create_working_folder() origin = '%s/photo.jpg' % folder shutil.copyfile(helper.get_file('plain.jpg'), origin) photo = Photo(origin) metadata = photo.get_metadata() assert metadata['title'] == None, metadata['title'] new_title = 'Some Title' photo.set_metadata(title = new_title) new_metadata = photo.get_metadata() assert new_metadata['title'] == new_title, new_metadata['title'] def test_set_metadata_basename(): temporary_folder, folder = helper.create_working_folder() origin = '%s/photo.jpg' % folder shutil.copyfile(helper.get_file('plain.jpg'), origin) photo = Photo(origin) metadata = photo.get_metadata() assert metadata['base_name'] == 'photo', metadata['base_name'] new_basename = 'Some Base Name' photo.set_metadata_basename(new_basename) new_metadata = photo.get_metadata() assert new_metadata['base_name'] == new_basename, new_metadata['base_name']
jmathai/elodie
elodie/tests/media/base_test.py
Python
apache-2.0
3,875
# -*- coding: utf-8 -*- # Generated by Django 1.9c1 on 2015-11-19 13:23 from __future__ import unicode_literals import datetime from django.db import migrations, models import django.utils.crypto import functools class Migration(migrations.Migration): dependencies = [ ('reminders_messages', '0004_auto_20151117_1820'), ] operations = [ migrations.AlterModelOptions( name='message', options={'get_latest_by': 'created', 'ordering': ('created',)}, ), migrations.AddField( model_name='message', name='state_updated', field=models.DateTimeField(default=datetime.datetime.utcnow), ), migrations.AlterField( model_name='message', name='ident', field=models.CharField(default=functools.partial(django.utils.crypto.get_random_string, *(40,), **{}), max_length=40, unique=True), ), ]
takeyourmeds/takeyourmeds-web
takeyourmeds/reminders/reminders_messages/migrations/0005_auto_20151119_1323.py
Python
mit
949
#!/usr/bin/env python3 from jsonschema import validate import json import sys schema = json.load(open(sys.argv[1])) manifest = json.load(open(sys.argv[2])) validate(instance=manifest, schema=schema)
HIPERFIT/futhark
tests_lib/c/validatemanifest.py
Python
isc
202
from django.contrib.admin.sites import site from django.contrib.auth import login as auth_login, logout as auth_logout from django.core.serializers.json import simplejson as json from django.forms import ModelForm from django.http import Http404, HttpResponse, HttpResponseRedirect from django.middleware.csrf import get_token, CsrfViewMiddleware from django.utils.text import capfirst from django.views.decorators.csrf import csrf_exempt from django_remote_forms.forms import RemoteForm from adminapi.apps.adminapi.constants import ADMIN_FORM_OVERRIDES from adminapi.apps.adminapi.forms import LoginForm from adminapi.apps.adminapi.utils import LazyEncoder @csrf_exempt def handle_login(request): csrf_middleware = CsrfViewMiddleware() response_data = {} form = None if request.raw_post_data: request.POST = json.loads(request.raw_post_data) csrf_middleware.process_view(request, None, None, None) if 'data' in request.POST: form = LoginForm(data=request.POST['data']) if form.is_valid(): if not request.POST['meta']['validate']: auth_login(request, form.get_user()) else: form = LoginForm(request) response_data['csrfmiddlewaretoken'] = get_token(request) if form is not None: remote_form = RemoteForm(form) response_data.update(remote_form.as_dict()) response = HttpResponse(json.dumps(response_data, cls=LazyEncoder), mimetype="application/json") csrf_middleware.process_response(request, response) return response def handle_logout(request): auth_logout(request) return HttpResponseRedirect('/') def get_models(request, app_label=None): # Return data on all models registered with admin if not request.user.is_authenticated(): return HttpResponse('Unauthorized request', status=401) if request.user.is_staff or request.user.is_superuser: has_module_perms = True else: has_module_perms = request.user.has_module_perms(app_label) app_list = [] for model, model_admin in site._registry.items(): model_name = model._meta.module_name if app_label is not None and app_label != model._meta.app_label: continue else: current_app_label = model._meta.app_label is_new_app = True current_app_dict = { 'label': current_app_label, 'title': capfirst(current_app_label), 'has_module_perms': has_module_perms, 'models': [] } for app_dict in app_list: if app_dict['label'] == current_app_label: current_app_dict = app_dict is_new_app = False break if has_module_perms: if request.user.is_superuser: perms = {'add': True, 'change': True, 'delete': True} else: perms = model_admin.get_model_perms(request) # Check whether user has any perm for this module. # If so, add the module to the model_list. if True in perms.values(): model_dict = { 'app_label': current_app_label, 'name': model_name, 'title': unicode(capfirst(model._meta.verbose_name_plural)), 'perms': perms, } current_app_dict['models'].append(model_dict), if 'models' in current_app_dict: # Sort the models alphabetically within each app. current_app_dict['models'].sort(key=lambda x: x['name']) if is_new_app: app_list.append(current_app_dict) if not app_list: raise Http404('The requested admin page does not exist') response_data = { 'app_list': app_list } return HttpResponse(json.dumps(response_data, cls=LazyEncoder), mimetype="application/json") def get_model_instances(request, app_label, model_name): if not request.user.is_authenticated(): return HttpResponse('Unauthorized', status=401) # Return list of instances for a given model response_data = { 'name': model_name, 'header': [], 'instances': [], 'admin': {} } for model, model_admin in site._registry.items(): current_app_label = '' if app_label != model._meta.app_label or model_name != model._meta.module_name: continue else: current_app_label = model._meta.app_label response_data['admin'].update({ 'list_display': model_admin.list_display, 'list_editable': model_admin.list_editable, 'ordering': model_admin.ordering }) if 'app' not in response_data: response_data['app'] = { 'label': current_app_label, 'title': capfirst(current_app_label) } if 'title' not in response_data: response_data['title'] = unicode(capfirst(model._meta.verbose_name_plural)) is_header_generated = False for model_instance in model.objects.all(): instance_data = { 'id': model_instance.pk, 'name': model_name, 'app_label': app_label, 'list_data': { 'lead': None, 'rest': [] } } if '__str__' in response_data['admin']['list_display']: instance_data['list_data']['rest'] = (model_instance.pk, unicode(model_instance),) if not is_header_generated: response_data['header'] = ('ID', 'Title',) else: for instance_property_name in response_data['admin']['list_display']: instance_property_value = getattr(model_instance, instance_property_name, '') if callable(instance_property_value): instance_property_value = instance_property_value() instance_property_value = unicode(instance_property_value) if not is_header_generated: normalized_instance_property_name = instance_property_name if '__' in instance_property_name: normalized_instance_property_name = ' '.join(instance_property_name.split('__')[1:]) normalized_instance_property_name = capfirst(normalized_instance_property_name) response_data['header'].append(normalized_instance_property_name) instance_data['list_data']['rest'].append(instance_property_value) if not is_header_generated: is_header_generated = True # Split the list of values instance_data['list_data']['lead'] = instance_data['list_data']['rest'][0] instance_data['list_data']['rest'] = instance_data['list_data']['rest'][1:] response_data['instances'].append(instance_data) return HttpResponse(json.dumps(response_data, cls=LazyEncoder), mimetype="application/json") @csrf_exempt def handle_instance_form(request, app_label, model_name, instance_id=None): if not request.user.is_authenticated(): return HttpResponse('Unauthorized', status=401) csrf_middleware = CsrfViewMiddleware() response_data = { 'meta': { 'app_label': app_label, 'model_name': model_name }, 'admin': {} } instance = None for model, model_admin in site._registry.items(): if app_label != model._meta.app_label or model_name != model._meta.module_name: continue field_configuration = { 'include': model_admin.fields or [], 'exclude': model_admin.exclude or [], 'ordering': model_admin.fields or [], 'fieldsets': model_admin.fieldsets or {}, 'readonly': model_admin.readonly_fields or [] } if instance_id is not None: response_data[instance_id] = instance_id try: instance = model.objects.get(pk=instance_id) except model.DoesNotExist: raise Http404('Invalid instance ID') current_model = model CurrentModelForm = ADMIN_FORM_OVERRIDES.get(model_name, None) if CurrentModelForm is None: class CurrentModelForm(ModelForm): class Meta: model = current_model if request.method == 'GET': # Return instance form for given model name # Return initial values if instance ID is supplied, otherwise return empty form if instance is None: form = CurrentModelForm() else: form = CurrentModelForm(instance=instance) for field_name, initial_value in form.initial.items(): if initial_value is not None and field_name in form.fields: form.fields[field_name].initial = initial_value response_data['csrfmiddlewaretoken'] = get_token(request) remote_form = RemoteForm(form, **field_configuration) response_data.update(remote_form.as_dict()) elif request.raw_post_data: request.POST = json.loads(request.raw_post_data) csrf_middleware.process_view(request, None, None, None) if 'data' in request.POST: if instance_id is None: form = CurrentModelForm(request.POST['data']) else: form = CurrentModelForm(request.POST['data'], instance=instance) if form.is_valid(): if not request.POST['meta']['validate']: form.save() remote_form = RemoteForm(form, **field_configuration) response_data.update(remote_form.as_dict()) response = HttpResponse(json.dumps(response_data, cls=LazyEncoder), mimetype="application/json") csrf_middleware.process_response(request, response) return response
tarequeh/django-remote-admin
src/adminapi/apps/adminapi/views.py
Python
mit
10,166
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import stat import unittest from src.test.py.bazel import test_base class CcImportTest(test_base.TestBase): def createProjectFiles(self, alwayslink=0, system_provided=0, linkstatic=1, provide_header=True): self.ScratchFile('WORKSPACE') # We use the outputs of cc_binary and cc_library as precompiled # libraries for cc_import self.ScratchFile( 'lib/BUILD', [ 'package(default_visibility = ["//visibility:public"])', '', 'cc_binary(', ' name = "libA.so",', ' srcs = ["a.cc"],', ' linkshared = 1,', ')', '', 'filegroup(', ' name = "libA_ifso",', ' srcs = [":libA.so"],', ' output_group = "interface_library",', ')', '', 'cc_library(', ' name = "libA",', ' srcs = ["a.cc", "a_al.cc"],', ')', '', 'filegroup(', ' name = "libA_archive",', ' srcs = [":libA"],', ' output_group = "archive",', ')', '', 'cc_import(', ' name = "A",', ' static_library = "//lib:libA_archive",', ' shared_library = "//lib:libA.so",' if not system_provided else '', # On Windows, we always need the interface library ' interface_library = "//lib:libA_ifso",' if self.IsWindows() else ( # On Unix, we use .so file as interface library # if system_provided is true ' interface_library = "//lib:libA.so",' if system_provided else ''), ' hdrs = ["a.h"],' if provide_header else '', ' alwayslink = %s,' % str(alwayslink), ' system_provided = %s,' % str(system_provided), ')', ]) self.ScratchFile('lib/a.cc', [ '#include <stdio.h>', '', '#ifdef _WIN32', ' #define DLLEXPORT __declspec(dllexport)', '#else', ' #define DLLEXPORT', '#endif', '', 'DLLEXPORT void HelloWorld() {', ' printf("HelloWorld\\n");', '}', ]) # For testing alwayslink=1 self.ScratchFile('lib/a_al.cc', [ 'extern int global_variable;', 'int init() {', ' ++global_variable;', ' return global_variable;', '}', 'int x = init();', 'int y = init();', ]) self.ScratchFile('lib/a.h', [ 'void HelloWorld();', ]) self.ScratchFile('main/BUILD', [ 'cc_binary(', ' name = "B",', ' srcs = ["b.cc"],', ' deps = ["//lib:A",],', ' linkstatic = %s,' % str(linkstatic), ')', ]) self.ScratchFile('main/b.cc', [ '#include <stdio.h>', '#include "lib/a.h"', 'int global_variable = 0;', 'int main() {', ' HelloWorld();', ' printf("global : %d\\n", global_variable);', ' return 0;', '}', ]) def getBazelInfo(self, info_key): exit_code, stdout, stderr = self.RunBazel(['info', info_key]) self.AssertExitCode(exit_code, 0, stderr) return stdout[0] def testLinkStaticLibrary(self): self.createProjectFiles(alwayslink=0, linkstatic=1) bazel_bin = self.getBazelInfo('bazel-bin') suffix = '.exe' if self.IsWindows() else '' exit_code, _, stderr = self.RunBazel(['build', '//main:B']) self.AssertExitCode(exit_code, 0, stderr) b_bin = os.path.join(bazel_bin, 'main/B' + suffix) self.assertTrue(os.path.exists(b_bin)) exit_code, stdout, stderr = self.RunProgram([b_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(stdout[0], 'HelloWorld') self.assertEqual(stdout[1], 'global : 0') def testAlwayslinkStaticLibrary(self): self.createProjectFiles(alwayslink=1, linkstatic=1) bazel_bin = self.getBazelInfo('bazel-bin') suffix = '.exe' if self.IsWindows() else '' exit_code, _, stderr = self.RunBazel(['build', '//main:B']) self.AssertExitCode(exit_code, 0, stderr) b_bin = os.path.join(bazel_bin, 'main/B' + suffix) self.assertTrue(os.path.exists(b_bin)) exit_code, stdout, stderr = self.RunProgram([b_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(stdout[0], 'HelloWorld') self.assertEqual(stdout[1], 'global : 2') def testLinkSharedLibrary(self): self.createProjectFiles(linkstatic=0) bazel_bin = self.getBazelInfo('bazel-bin') suffix = '.exe' if self.IsWindows() else '' exit_code, _, stderr = self.RunBazel(['build', '//main:B']) self.AssertExitCode(exit_code, 0, stderr) b_bin = os.path.join(bazel_bin, 'main/B' + suffix) self.assertTrue(os.path.exists(b_bin)) if self.IsWindows(): self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/libA.so'))) exit_code, stdout, stderr = self.RunProgram([b_bin]) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(stdout[0], 'HelloWorld') def testSystemProvidedSharedLibraryOnWinodws(self): if not self.IsWindows(): return self.createProjectFiles(system_provided=1, linkstatic=0) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//main:B']) self.AssertExitCode(exit_code, 0, stderr) b_bin = os.path.join(bazel_bin, 'main/B.exe') exit_code, stdout, stderr = self.RunProgram([b_bin]) # Should fail because missing libA.so self.assertFalse(exit_code == 0) # Let's build libA.so and add it into PATH exit_code, stdout, stderr = self.RunBazel(['build', '//lib:libA.so']) self.AssertExitCode(exit_code, 0, stderr) exit_code, stdout, stderr = self.RunProgram( [b_bin], env_add={ 'PATH': str(os.path.join(bazel_bin, 'lib')) }) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(stdout[0], 'HelloWorld') def testSystemProvidedSharedLibraryOnUnix(self): if not self.IsUnix(): return self.createProjectFiles(system_provided=1, linkstatic=0) bazel_bin = self.getBazelInfo('bazel-bin') exit_code, _, stderr = self.RunBazel(['build', '//main:B']) self.AssertExitCode(exit_code, 0, stderr) b_bin = os.path.join(bazel_bin, 'main/B') tmp_dir = self.ScratchDir('temp_dir_for_run_b_bin') b_bin_tmp = os.path.join(tmp_dir, 'B') # Copy the binary to a temp directory to make sure it cannot find # libA.so shutil.copyfile(b_bin, b_bin_tmp) os.chmod(b_bin_tmp, stat.S_IRWXU) exit_code, stdout, stderr = self.RunProgram([b_bin_tmp]) # Should fail because missing libA.so self.assertFalse(exit_code == 0) # Let's build libA.so and add it into PATH exit_code, stdout, stderr = self.RunBazel(['build', '//lib:libA.so']) self.AssertExitCode(exit_code, 0, stderr) exit_code, stdout, stderr = self.RunProgram( [b_bin_tmp], env_add={ # For Linux 'LD_LIBRARY_PATH': str(os.path.join(bazel_bin, 'lib')), # For Mac 'DYLD_LIBRARY_PATH': str(os.path.join(bazel_bin, 'lib')), }) self.AssertExitCode(exit_code, 0, stderr) self.assertEqual(stdout[0], 'HelloWorld') def testCcImportHeaderCheck(self): self.createProjectFiles(provide_header=False) # Build should fail, because lib/a.h is not declared in BUILD file, disable # sandbox so that bazel produces same error across different platforms. exit_code, _, stderr = self.RunBazel( ['build', '//main:B', '--spawn_strategy=standalone']) self.AssertExitCode(exit_code, 1, stderr) self.assertIn('this rule is missing dependency declarations for the' ' following files included by \'main/b.cc\':', ''.join(stderr)) def AssertFileContentContains(self, file_path, entry): with open(file_path, 'r') as f: if entry not in f.read(): self.fail('File "%s" does not contain "%s"' % (file_path, entry)) if __name__ == '__main__': unittest.main()
dropbox/bazel
src/test/py/bazel/cc_import_test.py
Python
apache-2.0
8,839
#!/usr/bin/env python """ @package mi.dataset.parser.test.test_dosta_abcdjm_sio @file mi/dataset/parser/test/test_dosta_abcdjm_sio.py @author Emily Hahn @brief An dosta series a,b,c,d,j,m through sio specific dataset agent parser """ __author__ = 'Emily Hahn' __license__ = 'Apache 2.0' import os from nose.plugins.attrib import attr from mi.core.log import get_logger log = get_logger() from mi.dataset.test.test_parser import ParserUnitTestCase, BASE_RESOURCE_PATH from mi.dataset.parser.dosta_abcdjm_sio import \ DostaAbcdjmSioRecoveredDataParticle, \ DostaAbcdjmSioTelemeteredDataParticle, \ DostaAbcdjmSioRecoveredMetadataDataParticle, \ DostaAbcdjmSioTelemeteredMetadataDataParticle, \ DostaAbcdjmSioParser, \ METADATA_PARTICLE_CLASS_KEY, \ DATA_PARTICLE_CLASS_KEY from mi.dataset.dataset_parser import DataSetDriverConfigKeys RESOURCE_PATH = os.path.join(BASE_RESOURCE_PATH, 'dosta_abcdjm', 'sio', 'resource') @attr('UNIT', group='mi') class DostaAbcdjmSioParserUnitTestCase(ParserUnitTestCase): def setUp(self): ParserUnitTestCase.setUp(self) self.config_telem = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_abcdjm_sio', DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { METADATA_PARTICLE_CLASS_KEY: DostaAbcdjmSioTelemeteredMetadataDataParticle, DATA_PARTICLE_CLASS_KEY: DostaAbcdjmSioTelemeteredDataParticle } } self.config_recov = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dosta_abcdjm_sio', DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { METADATA_PARTICLE_CLASS_KEY: DostaAbcdjmSioRecoveredMetadataDataParticle, DATA_PARTICLE_CLASS_KEY: DostaAbcdjmSioRecoveredDataParticle } } def test_simple(self): """ Read test data from the file and pull out telemetered data particles. Assert that the results are those we expected. """ with open(os.path.join(RESOURCE_PATH, 'node59p1_1.dosta.dat')) as stream_handle: parser = DostaAbcdjmSioParser(self.config_telem, stream_handle, self.exception_callback) particles = parser.get_records(7) self.assert_particles(particles, "dosta_telem_1.yml", RESOURCE_PATH) self.assertEqual(self.exception_callback_value, []) def test_simple_recovered(self): """ Read test data and pull out recovered data particles """ with open(os.path.join(RESOURCE_PATH, 'DOS15908_1st7.DAT')) as stream_handle: parser = DostaAbcdjmSioParser(self.config_recov, stream_handle, self.exception_callback) # get the first 4 particles particles = parser.get_records(4) self.assert_particles(particles, "dosta_recov_1.yml", RESOURCE_PATH) # get the next 4 particles (confirming we can break up getting records) particles = parser.get_records(4) self.assert_particles(particles, "dosta_recov_2.yml", RESOURCE_PATH) # confirm no exceptions occurred self.assertEqual(self.exception_callback_value, []) def test_long_stream(self): """ Read test data and pull out recovered data particles """ with open(os.path.join(RESOURCE_PATH, 'node59p1_0.dosta.dat')) as stream_handle: parser = DostaAbcdjmSioParser(self.config_telem, stream_handle, self.exception_callback) # request more particles than available particles = parser.get_records(40) # confirm we only get requested number self.assertEqual(len(particles), 37) # confirm no exceptions occurred self.assertEqual(self.exception_callback_value, []) def test_long_stream_recovered(self): """ Read test data and pull out recovered data particles """ with open(os.path.join(RESOURCE_PATH, 'DOS15908.DAT')) as stream_handle: parser = DostaAbcdjmSioParser(self.config_recov, stream_handle, self.exception_callback) particles = parser.get_records(100) # confirm we get requested number self.assertEqual(len(particles), 97) self.assertEqual(self.exception_callback_value, []) def test_drain(self): """ This test ensures that we stop parsing chunks when we have completed parsing all the records in the input file. """ with open(os.path.join(RESOURCE_PATH, 'node59p1_0.dosta.dat')) as stream_handle: parser = DostaAbcdjmSioParser(self.config_telem, stream_handle, self.exception_callback) # request more particles than available particles = parser.get_records(40) # confirm we only get requested number self.assertEqual(len(particles), 37) # request more particles than available particles = parser.get_records(40) # confirm we only get requested number self.assertEqual(len(particles), 0) # confirm no exceptions occurred self.assertEqual(self.exception_callback_value, []) def test_10603(self): """ Read test data from the file and pull out telemetered data particles. Assert that the results are those we expected. """ with open(os.path.join(RESOURCE_PATH, 'node25p1_46.dosta_1237201.dat')) as stream_handle: parser = DostaAbcdjmSioParser(self.config_telem, stream_handle, self.exception_callback) particles = parser.get_records(700) self.assertEqual(len(particles), 45) self.assertEqual(self.exception_callback_value, [])
oceanobservatories/mi-dataset
mi/dataset/parser/test/test_dosta_abcdjm_sio.py
Python
bsd-2-clause
5,899
# -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2018 OSGeo # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### import json import os import time import shutil import requests import traceback import re import six from .utils import utils from requests.auth import HTTPBasicAuth from xmltodict import parse as parse_xml from urllib.parse import urlparse, urljoin from django.conf import settings from django.core.management import call_command from django.core.management.base import BaseCommand, CommandError from geonode.utils import (DisableDjangoSignals, get_dir_time_suffix, zip_dir, copy_tree) from geonode.base.models import Configuration class Command(BaseCommand): help = 'Backup the GeoNode application data' def add_arguments(self, parser): # Named (optional) arguments utils.option(parser) utils.geoserver_option_list(parser) parser.add_argument( '-i', '--ignore-errors', action='store_true', dest='ignore_errors', default=False, help='Stop after any errors are encountered.') parser.add_argument( '-f', '--force', action='store_true', dest='force_exec', default=False, help='Forces the execution without asking for confirmation.') parser.add_argument( '--skip-geoserver', action='store_true', default=False, help='Skips geoserver backup') parser.add_argument( '--backup-dir', dest='backup_dir', help='Destination folder where to store the backup archive. It must be writable.') parser.add_argument( '--skip-read-only', action='store_true', dest='skip_read_only', default=False, help='Skips activation of the Read Only mode in backup procedure execution.' ) def handle(self, **options): skip_read_only = options.get('skip_read_only') config = Configuration.load() # activate read only mode and store it's original config value if not skip_read_only: original_read_only_value = config.read_only config.read_only = True config.save() try: # execute backup procedure self.execute_backup(**options) except Exception: raise finally: # restore read only mode's original value if not skip_read_only: config.read_only = original_read_only_value config.save() def execute_backup(self, **options): ignore_errors = options.get('ignore_errors') config = utils.Config(options) force_exec = options.get('force_exec') backup_dir = options.get('backup_dir') skip_geoserver = options.get('skip_geoserver') if not backup_dir or len(backup_dir) == 0: raise CommandError("Destination folder '--backup-dir' is mandatory") print("Before proceeding with the Backup, please ensure that:") print(" 1. The backend (DB or whatever) is accessible and you have rights") print(" 2. The GeoServer is up and running and reachable from this machine") message = 'You want to proceed?' if force_exec or utils.confirm(prompt=message, resp=False): # Create Target Folder dir_time_suffix = get_dir_time_suffix() target_folder = os.path.join(backup_dir, dir_time_suffix) if not os.path.exists(target_folder): os.makedirs(target_folder) # Temporary folder to store backup files. It will be deleted at the end. os.chmod(target_folder, 0o777) if not skip_geoserver: self.create_geoserver_backup(config, settings, target_folder, ignore_errors) self.dump_geoserver_raster_data(config, settings, target_folder) self.dump_geoserver_vector_data(config, settings, target_folder) print("Dumping geoserver external resources") self.dump_geoserver_externals(config, settings, target_folder) else: print("Skipping geoserver backup") # Deactivate GeoNode Signals with DisableDjangoSignals(): # Dump Fixtures for app_name, dump_name in zip(config.app_names, config.dump_names): # prevent dumping BackupRestore application if app_name == 'br': continue print("Dumping '"+app_name+"' into '"+dump_name+".json'.") # Point stdout at a file for dumping data to. output = open(os.path.join(target_folder, dump_name+'.json'), 'w') call_command('dumpdata', app_name, format='json', indent=2, stdout=output) output.close() # Store Media Root media_root = settings.MEDIA_ROOT media_folder = os.path.join(target_folder, utils.MEDIA_ROOT) if not os.path.exists(media_folder): os.makedirs(media_folder) copy_tree(media_root, media_folder, ignore=utils.ignore_time(config.gs_data_dt_filter[0], config.gs_data_dt_filter[1])) print("Saved Media Files from '"+media_root+"'.") # Store Static Root static_root = settings.STATIC_ROOT static_folder = os.path.join(target_folder, utils.STATIC_ROOT) if not os.path.exists(static_folder): os.makedirs(static_folder) copy_tree(static_root, static_folder, ignore=utils.ignore_time(config.gs_data_dt_filter[0], config.gs_data_dt_filter[1])) print("Saved Static Root from '"+static_root+"'.") # Store Static Folders static_folders = settings.STATICFILES_DIRS static_files_folders = os.path.join(target_folder, utils.STATICFILES_DIRS) if not os.path.exists(static_files_folders): os.makedirs(static_files_folders) for static_files_folder in static_folders: # skip dumping of static files of apps not located under LOCAL_ROOT path # (check to prevent saving files from site-packages in project-template based GeoNode projects) if getattr(settings, 'LOCAL_ROOT', None) and not static_files_folder.startswith(settings.LOCAL_ROOT): print(f"Skipping static directory: {static_files_folder}. It's not located under LOCAL_ROOT path: {settings.LOCAL_ROOT}.") continue static_folder = os.path.join(static_files_folders, os.path.basename(os.path.normpath(static_files_folder))) if not os.path.exists(static_folder): os.makedirs(static_folder) copy_tree(static_files_folder, static_folder, ignore=utils.ignore_time(config.gs_data_dt_filter[0], config.gs_data_dt_filter[1])) print("Saved Static Files from '"+static_files_folder+"'.") # Store Template Folders template_folders = [] try: template_folders = settings.TEMPLATE_DIRS except Exception: try: template_folders = settings.TEMPLATES[0]['DIRS'] except Exception: pass template_files_folders = os.path.join(target_folder, utils.TEMPLATE_DIRS) if not os.path.exists(template_files_folders): os.makedirs(template_files_folders) for template_files_folder in template_folders: # skip dumping of template files of apps not located under LOCAL_ROOT path # (check to prevent saving files from site-packages in project-template based GeoNode projects) if getattr(settings, 'LOCAL_ROOT', None) and not template_files_folder.startswith(settings.LOCAL_ROOT): print(f"Skipping template directory: {template_files_folder}. It's not located under LOCAL_ROOT path: {settings.LOCAL_ROOT}.") continue template_folder = os.path.join(template_files_folders, os.path.basename(os.path.normpath(template_files_folder))) if not os.path.exists(template_folder): os.makedirs(template_folder) copy_tree(template_files_folder, template_folder, ignore=utils.ignore_time(config.gs_data_dt_filter[0], config.gs_data_dt_filter[1])) print("Saved Template Files from '"+template_files_folder+"'.") # Store Locale Folders locale_folders = settings.LOCALE_PATHS locale_files_folders = os.path.join(target_folder, utils.LOCALE_PATHS) if not os.path.exists(locale_files_folders): os.makedirs(locale_files_folders) for locale_files_folder in locale_folders: # skip dumping of locale files of apps not located under LOCAL_ROOT path # (check to prevent saving files from site-packages in project-template based GeoNode projects) if getattr(settings, 'LOCAL_ROOT', None) and not locale_files_folder.startswith(settings.LOCAL_ROOT): print(f"Skipping locale directory: {locale_files_folder}. It's not located under LOCAL_ROOT path: {settings.LOCAL_ROOT}.") continue locale_folder = os.path.join(locale_files_folders, os.path.basename(os.path.normpath(locale_files_folder))) if not os.path.exists(locale_folder): os.makedirs(locale_folder) copy_tree(locale_files_folder, locale_folder, ignore=utils.ignore_time(config.gs_data_dt_filter[0], config.gs_data_dt_filter[1])) print("Saved Locale Files from '"+locale_files_folder+"'.") # Create Final ZIP Archive backup_archive = os.path.join(backup_dir, dir_time_suffix+'.zip') zip_dir(target_folder, backup_archive) # Generate a md5 hash of a backup archive and save it backup_md5_file = os.path.join(backup_dir, dir_time_suffix+'.md5') zip_archive_md5 = utils.md5_file_hash(backup_archive) with open(backup_md5_file, 'w') as md5_file: md5_file.write(zip_archive_md5) # Generate the ini file with the current settings used by the backup command backup_ini_file = os.path.join(backup_dir, dir_time_suffix + '.ini') with open(backup_ini_file, 'w') as configfile: config.config_parser.write(configfile) # Clean-up Temp Folder try: shutil.rmtree(target_folder) except Exception: print("WARNING: Could not be possible to delete the temp folder: '" + str(target_folder) + "'") print("Backup Finished. Archive generated.") return str(os.path.join(backup_dir, dir_time_suffix+'.zip')) def create_geoserver_backup(self, config, settings, target_folder, ignore_errors): # Create GeoServer Backup url = settings.OGC_SERVER['default']['LOCATION'] user = settings.OGC_SERVER['default']['USER'] passwd = settings.OGC_SERVER['default']['PASSWORD'] geoserver_bk_file = os.path.join(target_folder, 'geoserver_catalog.zip') print("Dumping 'GeoServer Catalog ["+url+"]' into '"+geoserver_bk_file+"'.") r = requests.put(url + 'rest/reset/', auth=HTTPBasicAuth(user, passwd)) if r.status_code != 200: raise ValueError('Could not reset GeoServer catalog!') r = requests.put(url + 'rest/reload/', auth=HTTPBasicAuth(user, passwd)) if r.status_code != 200: raise ValueError('Could not reload GeoServer catalog!') error_backup = 'Could not successfully backup GeoServer ' + \ 'catalog [{}rest/br/backup/]: {} - {}' _options = [ 'BK_CLEANUP_TEMP=true', 'BK_SKIP_SETTINGS=false', 'BK_SKIP_SECURITY=false', 'BK_BEST_EFFORT={}'.format('true' if ignore_errors else 'false'), 'exclude.file.path={}'.format(config.gs_exclude_file_path) ] data = {'backup': {'archiveFile': geoserver_bk_file, 'overwrite': 'true', 'options': {'option': _options}}} headers = { 'Accept': 'application/json', 'Content-type': 'application/json' } r = requests.post(url + 'rest/br/backup/', data=json.dumps(data), headers=headers, auth=HTTPBasicAuth(user, passwd)) if r.status_code in (200, 201, 406): try: r = requests.get(url + 'rest/br/backup.json', headers=headers, auth=HTTPBasicAuth(user, passwd), timeout=10) if (r.status_code == 200): gs_backup = r.json() _url = urlparse(gs_backup['backups']['backup'][len(gs_backup['backups']['backup']) - 1]['href']) _url = '{}?{}'.format(urljoin(url, _url.path), _url.query) r = requests.get(_url, headers=headers, auth=HTTPBasicAuth(user, passwd), timeout=10) if (r.status_code == 200): gs_backup = r.json() if (r.status_code != 200): raise ValueError(error_backup.format(url, r.status_code, r.text)) except ValueError: raise ValueError(error_backup.format(url, r.status_code, r.text)) gs_bk_exec_id = gs_backup['backup']['execution']['id'] r = requests.get(url + 'rest/br/backup/' + str(gs_bk_exec_id) + '.json', headers=headers, auth=HTTPBasicAuth(user, passwd), timeout=10) if (r.status_code == 200): gs_bk_exec_status = gs_backup['backup']['execution']['status'] gs_bk_exec_progress = gs_backup['backup']['execution']['progress'] gs_bk_exec_progress_updated = '0/0' while (gs_bk_exec_status != 'COMPLETED' and gs_bk_exec_status != 'FAILED'): if (gs_bk_exec_progress != gs_bk_exec_progress_updated): gs_bk_exec_progress_updated = gs_bk_exec_progress r = requests.get(url + 'rest/br/backup/' + str(gs_bk_exec_id) + '.json', headers=headers, auth=HTTPBasicAuth(user, passwd), timeout=10) if (r.status_code == 200): try: gs_backup = r.json() except ValueError: raise ValueError(error_backup.format(url, r.status_code, r.text)) gs_bk_exec_status = gs_backup['backup']['execution']['status'] gs_bk_exec_progress = gs_backup['backup']['execution']['progress'] print(str(gs_bk_exec_status) + ' - ' + gs_bk_exec_progress) time.sleep(3) else: raise ValueError(error_backup.format(url, r.status_code, r.text)) if gs_bk_exec_status == 'FAILED': raise ValueError(error_backup.format(url, r.status_code, r.text)) _permissions = 0o777 os.chmod(geoserver_bk_file, _permissions) status = os.stat(geoserver_bk_file) if oct(status.st_mode & 0o777) != str(oct(_permissions)): raise Exception("Could not update permissions of {}".format(geoserver_bk_file)) else: raise ValueError(error_backup.format(url, r.status_code, r.text)) def dump_geoserver_raster_data(self, config, settings, target_folder): if (config.gs_data_dir): if (config.gs_dump_raster_data): # Dump '$config.gs_data_dir/geonode' gs_data_root = os.path.join(config.gs_data_dir, 'geonode') if not os.path.isabs(gs_data_root): gs_data_root = os.path.join(settings.PROJECT_ROOT, '..', gs_data_root) print("Dumping GeoServer Uploaded Data from '"+gs_data_root+"'.") if os.path.exists(gs_data_root): gs_data_folder = os.path.join(target_folder, 'gs_data_dir', 'geonode') if not os.path.exists(gs_data_folder): os.makedirs(gs_data_folder) copy_tree(gs_data_root, gs_data_folder, ignore=utils.ignore_time(config.gs_data_dt_filter[0], config.gs_data_dt_filter[1])) print("Dumped GeoServer Uploaded Data from '"+gs_data_root+"'.") else: print("Skipped GeoServer Uploaded Data '"+gs_data_root+"'.") # Dump '$config.gs_data_dir/data/geonode' gs_data_root = os.path.join(config.gs_data_dir, 'data', 'geonode') if not os.path.isabs(gs_data_root): gs_data_root = os.path.join(settings.PROJECT_ROOT, '..', gs_data_root) print("Dumping GeoServer Uploaded Data from '"+gs_data_root+"'.") if os.path.exists(gs_data_root): gs_data_folder = os.path.join(target_folder, 'gs_data_dir', 'data', 'geonode') if not os.path.exists(gs_data_folder): os.makedirs(gs_data_folder) copy_tree(gs_data_root, gs_data_folder, ignore=utils.ignore_time(config.gs_data_dt_filter[0], config.gs_data_dt_filter[1])) print("Dumped GeoServer Uploaded Data from '" + gs_data_root + "'.") else: print("Skipped GeoServer Uploaded Data '"+gs_data_root+"'.") def dump_geoserver_vector_data(self, config, settings, target_folder): if (config.gs_dump_vector_data): # Dump Vectorial Data from DB datastore = settings.OGC_SERVER['default']['DATASTORE'] if (datastore): ogc_db_name = settings.DATABASES[datastore]['NAME'] ogc_db_user = settings.DATABASES[datastore]['USER'] ogc_db_passwd = settings.DATABASES[datastore]['PASSWORD'] ogc_db_host = settings.DATABASES[datastore]['HOST'] ogc_db_port = settings.DATABASES[datastore]['PORT'] gs_data_folder = os.path.join(target_folder, 'gs_data_dir', 'geonode') if not os.path.exists(gs_data_folder): os.makedirs(gs_data_folder) utils.dump_db(config, ogc_db_name, ogc_db_user, ogc_db_port, ogc_db_host, ogc_db_passwd, gs_data_folder) def dump_geoserver_externals(self, config, settings, target_folder): """Scan layers xml and see if there are external references. Find references to data outside data dir and include them in backup. Also, some references may point to specific url, which may not be available later. """ external_folder = os.path.join(target_folder, utils.EXTERNAL_ROOT) def copy_external_resource(abspath): external_path = os.path.join(external_folder, abspath[1:]) external_dir = os.path.dirname(external_path) if not os.path.isdir(external_dir): os.makedirs(external_dir) try: if not os.path.isdir(external_path) and os.path.exists(external_path): shutil.copy2(abspath, external_path) except shutil.SameFileError: print("WARNING: {} and {} are the same file!".format(abspath, external_path)) def match_filename(key, text, regexp=re.compile("^(.+)$")): if key in ('filename', ): match = regexp.match(text.decode("utf-8")) if match: relpath = match.group(1) try: abspath = relpath if os.path.isabs(relpath) else \ os.path.abspath( os.path.join(os.path.dirname(path), relpath)) if os.path.exists(abspath): return abspath except Exception: print("WARNING: Error while trying to dump {}".format(text)) return def match_fileurl(key, text, regexp=re.compile("^file:(.+)$")): if key in ('url', ): match = regexp.match(text.decode("utf-8")) if match: relpath = match.group(1) try: abspath = relpath if os.path.isabs(relpath) else \ os.path.abspath( os.path.join(config.gs_data_dir, relpath)) if os.path.exists(abspath): return abspath except Exception: print("WARNING: Error while trying to dump {}".format(text)) return def dump_external_resources_from_xml(path): def find_external(tree, key=None): if isinstance(tree, dict): for key, value in tree.items(): for found in find_external(value, key=key): yield found elif isinstance(tree, list): for item in tree: for found in find_external(item, key=key): yield found elif isinstance(tree, six.string_types): text = tree.encode('utf-8') for find in (match_fileurl, match_filename): found = find(key, text) if found: yield found with open(path, 'rb') as fd: content = fd.read() tree = parse_xml(content) for found in find_external(tree): if found.find(config.gs_data_dir) != 0: copy_external_resource(found) def is_xml_file(filename, regexp=re.compile(".*.xml$")): return regexp.match(filename) is not None for directory in ('workspaces', 'styles'): source = os.path.join(config.gs_data_dir, directory) for root, dirs, files in os.walk(source): for filename in filter(is_xml_file, files): path = os.path.join(root, filename) dump_external_resources_from_xml(path)
tomkralidis/geonode
geonode/br/management/commands/backup.py
Python
gpl-3.0
24,680
# -*- coding: utf-8 -*- """ /*************************************************************************** Blurring A QGIS plugin Blurring data ------------------- begin : 2014-03-11 copyright : (C) 2014 by TER Géomatique UM2 email : ter-floutage@googlegroups.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from Blurring import * """Class to know if an intersection exist between a QgsGeometry and a QgsVectorLayer""" class LayerIndex: def __init__(self, layer): self.__layer = layer self.__index = QgsSpatialIndex() for ft in layer.getFeatures(): self.__index.insertFeature(ft) def contains(self, point): """Return true if the point intersects the layer""" intersects = self.__index.intersects(point.boundingBox()) for i in intersects: request = QgsFeatureRequest().setFilterFid(i) feat = self.__layer.getFeatures(request).next() tmpGeom = QgsGeometry(feat.geometry()) if point.intersects(tmpGeom): return True return False def countIntersection(self,bufferGeom,nb): """Return true if the buffer intersects enough entities""" count = 0 intersects = self.__index.intersects(bufferGeom.boundingBox()) for i in intersects: request = QgsFeatureRequest().setFilterFid(i) feat = self.__layer.getFeatures(request).next() tmpGeom = QgsGeometry(feat.geometry()) if bufferGeom.intersects(tmpGeom): count += 1 if count >= nb: return True return False
Gustry/Blurring
CoreBlurring/LayerIndex.py
Python
gpl-3.0
2,440
"""MNE sample dataset """ from .sample import data_path, has_sample_data, requires_sample_data
jaeilepp/eggie
mne/datasets/sample/__init__.py
Python
bsd-2-clause
96
import glob import unittest test_files = glob.glob('test_*.py') modules = [ s[:-3] for s in test_files ] suites = [unittest.defaultTestLoader.loadTestsFromName(s) for s in modules] testSuite = unittest.TestSuite(suites) text_runner = unittest.TextTestRunner().run(testSuite)
doctormo/gtkme
tests/test_all.py
Python
gpl-3.0
276
#!/usr/bin/env python from molmod import * # 0) Load the molecule. mol = Molecule.from_file("ibuprofen.sdf") # 1) Print the largest element in the distance matrix. print("Largest interatomic distance [A]:") print(mol.distance_matrix.max()/angstrom) # Some comments: # - One can just write code that assumes the attribute distance_matrix is # present, but in fact it is only computed once the first time it is used. # - The method max is a method of the numpy array. It returns the largest value # in an array.
molmod/molmod
molmod/examples/001_molecules/d_size.py
Python
gpl-3.0
521
# coding: utf-8 # # CLUES Python utils - Utils and General classes that spin off from CLUES # Copyright (C) 2015 - GRyCAP - Universitat Politecnica de Valencia # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import glob import logging import os _ETC_PATHS = [ '/etc/' ] _MAIN_CONFIG_FILE = "app.cfg" _CONFIG_VAR_INCLUDE = "" _CONFIG_FILTER = "*.cfg" def set_paths(etc_paths = [ "/etc/" ]): """ Sets the paths where the configuration files will be searched * You can have multiple configuration files (e.g. in the /etc/default folder and in /etc/appfolder/) """ global _ETC_PATHS _ETC_PATHS = [] for p in etc_paths: _ETC_PATHS.append(os.path.expanduser(p)) def set_main_config_file(c_file): """ Sets the name of the main configuration file. This file will be searched in the configuration folders (see function "set_paths") """ global _MAIN_CONFIG_FILE _MAIN_CONFIG_FILE = c_file def set_config_filter(config_var_include = "CONFIG_DIR", filter_ = "*.cfg"): """ Sets the filter of other configuration files to include as configuration files. You can set also the subfolder that should contain these files (it should be a subfolder of one of the folders set with "set_paths") """ global _CONFIG_VAR_INCLUDE, _CONFIG_FILTER _CONFIG_VAR_INCLUDE = config_var_include _CONFIG_FILTER = filter_ _LOGGER = logging.getLogger("[CONFIG]") def config_filename(filename): """ Obtains the first filename found that is included in one of the configuration folders. This function returs the full path for the file. * It is useful for files that are not config-formatted (e.g. hosts files, json, etc.) that will be read using other mechanisms """ global _ETC_PATHS if filename.startswith('/'): _LOGGER.info("using absolute path for filename \"%s\"" % filename) return filename import os.path for fpath in _ETC_PATHS: current_path = "%s/%s" % (fpath, filename) if os.path.isfile(current_path): current_path = os.path.realpath(current_path) _LOGGER.info("using path \"%s\" for filename \"%s\"" % (current_path, filename)) return current_path _LOGGER.info("using path \"%s\" for filename \"%s\"" % (filename, filename)) return filename def read_config(section, variables, sink, filename = None, logvars = False): """ This functions creates a dictionary whose keys are the variables indicated in 'variables' with the values obtained from the config filenames set for this module. 'variables' is a dictionary of variable names and default values { 'VAR1': defaultval1, ...} The value for the variables is searched under the 'section' section from the configuration files (all the configuration files: the main one and those included by the config_filter mechanism) Sink is a dictionary (or object) that admits each of the variables to be created and set the value. """ global _ETC_PATHS try: import ConfigParser config = ConfigParser.ConfigParser() except: import configparser config = configparser.ConfigParser() if filename is None: config_files = existing_config_files() else: config_files = [] for fpath in _ETC_PATHS: config_files.append("%s/%s" % (fpath, filename)) config.read(config_files) options = {} if section in config.sections(): options = config.options(section) for varname, value in list(variables.items()): orig_varname = varname varname = varname.lower() if varname in options: try: if isinstance(value, bool): value = config.getboolean(section, varname) elif isinstance(value, int): value = config.getint(section, varname) elif isinstance(value, float): value = config.getfloat(section, varname) else: value = config.get(section, varname) if len(value) > 0: value = value.split("#")[0].strip() except: raise Exception("Invalid value for variable %s in config file" % orig_varname) varname = varname.upper() sink.__dict__[varname] = value if (logvars): _LOGGER.debug("%s=%s" % (varname, str(value))) class Configuration(): """ Class that reads the configuration for a set of variables, from one configuration file (or all the configuration files established using the other methods in this module). e.g. my_config = Configuration( 'GENERAL', { 'VAR1': 0, 'VAR2': 'default2' } ) """ def __init__(self, section, variables, filename = None, callback = None): read_config(section, variables, self, filename) if callback is not None: callback(self) """ This function is used to map the standard log level values from the string to the logging variable value """ def maploglevel(self, varname): import logging self.mapvalues(varname, { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR }, logging.DEBUG, True ) """ This function is used to map the values of a var to other values (e.g. strings to integer values) """ def mapvalues(self, varname, maps, default, lowercase = True): value = self.__dict__[varname] result = None found = False if lowercase: value = value.lower() for k, v in list(maps.items()): if value == k.lower(): result = v found = True break else: for k, v in list(maps.items()): if value == k: result = v found = True break if found: self.__dict__[varname] = result else: self.__dict__[varname] = default """ These functions act as helpers to translate a 'character separated' list stored in a string, to a typed list * these methods are included because they are very common in configuration files """ @staticmethod def str2intlist(str_list, separator = ","): ar_list = [ x.strip() for x in str_list.split(separator) ] result = [] for v in ar_list: try: v = int(v) except: v = None if v is not None: result.append(v) return result @staticmethod def str2list(str_list, separator = ","): ar_list = [ x.strip() for x in str_list.split(separator) ] return ar_list @staticmethod def str2floatlist(str_list, separator = ","): ar_list = [ x.strip() for x in str_list.split(separator) ] result = [] for v in ar_list: try: v = float(v) except: v = None if v is not None: result.append(v) return result def existing_config_files(): """ Method that calculates all the configuration files that are valid, according to the 'set_paths' and other methods for this module. """ global _ETC_PATHS global _MAIN_CONFIG_FILE global _CONFIG_VAR_INCLUDE global _CONFIG_FILTER config_files = [] for possible in _ETC_PATHS: config_files = config_files + glob.glob("%s%s" % (possible, _MAIN_CONFIG_FILE)) if _CONFIG_VAR_INCLUDE != "": main_config = Configuration("general", { _CONFIG_VAR_INCLUDE:"" }, _MAIN_CONFIG_FILE) if main_config.CONFIG_DIR != "": for possible in _ETC_PATHS: config_files = config_files + glob.glob("%s%s/%s" % (possible, main_config.CONFIG_DIR, _CONFIG_FILTER)) return config_files
grycap/cpyutils
config.py
Python
gpl-3.0
9,000
import ast from wtforms.fields import Field def slider_widget(field, ul_class='', **kwargs): """widget for rendering a SliderField""" # THE BELOW IS JUST AN EXAMPLE, NOTHING TO DO WITH SLIDER kwargs.setdefault('type', 'checkbox') field_id = kwargs.pop('id', field.id) html = [u'<ul %s>' % html_params(id=field_id, class_=ul_class)] for value, label, checked in field.iter_choices(): choice_id = u'%s-%s' % (field_id, value) options = dict(kwargs, name=field.name, value=value, id=choice_id) if checked: options['checked'] = 'checked' html.append(u'<li><input %s /> ' % html_options(**options)) html.append(u'<label %s>%s</label></li>') html.append(u'</ul>') return u''.join(html) class SliderField(Field): widget = slider_widget def __init__(self, id, min, max, value): self.id = id self.min = min self.max = max self.value = value def _value(self): return self.data def process_formdata(self, input): self.data = ast.literal_eval(input[0])
dgrtwo/gleam
src/gleam/fields.py
Python
mit
1,097
from django.conf import settings from menus.exceptions import NamespaceAllreadyRegistered from django.contrib.sites.models import Site from django.core.cache import cache from django.utils.translation import get_language import copy def lex_cache_key(key): """ Returns the language and site ID a cache key is related to. """ return key.rsplit('_', 2)[1:] class MenuPool(object): def __init__(self): self.menus = {} self.modifiers = [] self.discovered = False self.cache_keys = set() def discover_menus(self): if self.discovered: return for app in settings.INSTALLED_APPS: __import__(app, {}, {}, ['menu']) from menus.modifiers import register register() self.discovered = True def clear(self, site_id=None, language=None): def relevance_test(keylang, keysite): sok = not site_id lok = not language if site_id and (site_id == keysite or site_id == int(keysite)): sok = True if language and language == keylang: lok = True return lok and sok to_be_deleted = [] for key in self.cache_keys: keylang, keysite = lex_cache_key(key) if relevance_test(keylang, keysite): to_be_deleted.append(key) cache.delete_many(to_be_deleted) self.cache_keys.difference_update(to_be_deleted) def register_menu(self, menu): from menus.base import Menu assert issubclass(menu, Menu) if menu.__name__ in self.menus.keys(): raise NamespaceAllreadyRegistered, "[%s] a menu with this name is already registered" % menu.__name__ self.menus[menu.__name__] = menu() def register_modifier(self, modifier_class): from menus.base import Modifier assert issubclass(modifier_class, Modifier) if not modifier_class in self.modifiers: self.modifiers.append(modifier_class) def _build_nodes(self, request, site_id): lang = get_language() prefix = getattr(settings, "CMS_CACHE_PREFIX", "menu_cache_") key = "%smenu_nodes_%s_%s" % (prefix, lang, site_id) self.cache_keys.add(key) cached_nodes = cache.get(key, None) if cached_nodes: return cached_nodes final_nodes = [] for ns in self.menus: try: nodes = self.menus[ns].get_nodes(request) except: raise last = None for node in nodes: if not node.namespace: node.namespace = ns if node.parent_id: if not node.parent_namespace: node.parent_namespace = ns found = False if last: n = last while n: if n.namespace == node.namespace and n.id == node.parent_id: node.parent = n found = True n = None elif n.parent: n = n.parent else: n = None if not found: for n in nodes: if n.namespace == node.namespace and n.id == node.parent_id: node.parent = n found = True if found: node.parent.children.append(node) else: continue final_nodes.append(node) last = node duration = getattr(settings, "MENU_CACHE_DURATION", 60*60) cache.set(key, final_nodes, duration) return final_nodes def apply_modifiers(self, nodes, request, namespace=None, root_id=None, post_cut=False, breadcrumb=False): if not post_cut: nodes = self._mark_selected(request, nodes) for cls in self.modifiers: inst = cls() nodes = inst.modify(request, nodes, namespace, root_id, post_cut, breadcrumb) return nodes def get_nodes(self, request, namespace=None, root_id=None, site_id=None, breadcrumb=False): self.discover_menus() if not site_id: site_id = Site.objects.get_current().pk nodes = self._build_nodes(request, site_id) nodes = copy.deepcopy(nodes) nodes = self.apply_modifiers(nodes, request, namespace, root_id, post_cut=False, breadcrumb=breadcrumb) return nodes def _mark_selected(self, request, nodes): sel = None for node in nodes: node.sibling = False node.ancestor = False node.descendant = False node.selected = False if node.get_absolute_url() == request.path[:len(node.get_absolute_url())]: if sel: if len(node.get_absolute_url()) > len(sel.get_absolute_url()): sel = node else: sel = node else: node.selected = False if sel: sel.selected = True return nodes def get_menus_by_attribute(self, name, value): self.discover_menus() found = [] for menu in self.menus.items(): if hasattr(menu[1], name) and getattr(menu[1], name, None) == value: found.append((menu[0], menu[1].name)) return found def get_nodes_by_attribute(self, nodes, name, value): found = [] for node in nodes: if node.attr.get(name, None) == value: found.append(node) return found menu_pool = MenuPool()
dibaunaumh/tikal-corp-website
menus/menu_pool.py
Python
bsd-3-clause
5,979
#!/usr/bin/env python3 import argparse from config import Configure from config.api import IntranetAPI from profiling import Context, Profile verbose = True if __name__ == "__main__": conf = Configure() parser = argparse.ArgumentParser() parser.add_argument('-o', '--output', type=str) parser.add_argument('-v', '--verbose', type=bool) parser.add_argument('-u', '--user', type=str) parser.add_argument('-a', '--auth', type=str) parser.add_argument("-d", "--display-options", nargs='+', action='append', help='display_options element (default: all display)\n' 'options:' 'context' ' email' ' grade_stats' ' module_collection') parser.add_argument('-m', '-modules', metavar='ModuleCode', type=str, nargs='+', help='an str for the accumulator') args = parser.parse_args() if args.auth is not None: print("Save new auto login: " + args.auth) conf.set_auto_login(args.auth) api = IntranetAPI(conf) context = Context(api, dict({verbose: args.verbose})) profile = Profile(context, args.user) profile.collect() profile.display(args.display_options[0] if args.display_options is not None else None)
IniterWorker/epitech-stats-notes
application.py
Python
mit
1,390
# -*- coding: utf-8 -*- # # SecureDrop whistleblower submission system # Copyright (C) 2017 Loic Dachary <loic@dachary.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import re from db import db import i18n import i18n_tool import journalist_app as journalist_app_module import pytest import source_app from flask import render_template from flask import render_template_string from flask import request from flask import session from flask_babel import gettext from sdconfig import SDConfig from sh import pybabel from sh import sed from .utils.env import TESTS_DIR from werkzeug.datastructures import Headers os.environ['SECUREDROP_ENV'] = 'test' # noqa def verify_i18n(app): not_translated = 'code hello i18n' translated_fr = 'code bonjour' for accepted in ('unknown', 'en_US'): headers = Headers([('Accept-Language', accepted)]) with app.test_request_context(headers=headers): assert not hasattr(request, 'babel_locale') assert not_translated == gettext(not_translated) assert hasattr(request, 'babel_locale') assert render_template_string(''' {{ gettext('code hello i18n') }} ''').strip() == not_translated for lang in ('fr_FR', 'fr', 'fr-FR'): headers = Headers([('Accept-Language', lang)]) with app.test_request_context(headers=headers): assert not hasattr(request, 'babel_locale') assert translated_fr == gettext(not_translated) assert hasattr(request, 'babel_locale') assert render_template_string(''' {{ gettext('code hello i18n') }} ''').strip() == translated_fr # https://github.com/freedomofpress/securedrop/issues/2379 headers = Headers([('Accept-Language', 'en-US;q=0.6,fr_FR;q=0.4,nb_NO;q=0.2')]) with app.test_request_context(headers=headers): assert not hasattr(request, 'babel_locale') assert not_translated == gettext(not_translated) translated_cn = 'code chinese' for lang in ('zh-CN', 'zh-Hans-CN'): headers = Headers([('Accept-Language', lang)]) with app.test_request_context(headers=headers): assert not hasattr(request, 'babel_locale') assert translated_cn == gettext(not_translated) assert hasattr(request, 'babel_locale') assert render_template_string(''' {{ gettext('code hello i18n') }} ''').strip() == translated_cn translated_ar = 'code arabic' for lang in ('ar', 'ar-kw'): headers = Headers([('Accept-Language', lang)]) with app.test_request_context(headers=headers): assert not hasattr(request, 'babel_locale') assert translated_ar == gettext(not_translated) assert hasattr(request, 'babel_locale') assert render_template_string(''' {{ gettext('code hello i18n') }} ''').strip() == translated_ar with app.test_client() as c: page = c.get('/login') assert session.get('locale') is None assert not_translated == gettext(not_translated) assert b'?l=fr_FR' in page.data assert b'?l=en_US' not in page.data page = c.get('/login?l=fr_FR', headers=Headers([('Accept-Language', 'en_US')])) assert session.get('locale') == 'fr_FR' assert translated_fr == gettext(not_translated) assert b'?l=fr_FR' not in page.data assert b'?l=en_US' in page.data c.get('/', headers=Headers([('Accept-Language', 'en_US')])) assert session.get('locale') == 'fr_FR' assert translated_fr == gettext(not_translated) c.get('/?l=') assert session.get('locale') is None assert not_translated == gettext(not_translated) c.get('/?l=en_US', headers=Headers([('Accept-Language', 'fr_FR')])) assert session.get('locale') == 'en_US' assert not_translated == gettext(not_translated) c.get('/', headers=Headers([('Accept-Language', 'fr_FR')])) assert session.get('locale') == 'en_US' assert not_translated == gettext(not_translated) c.get('/?l=', headers=Headers([('Accept-Language', 'fr_FR')])) assert session.get('locale') is None assert translated_fr == gettext(not_translated) c.get('/') assert session.get('locale') is None assert not_translated == gettext(not_translated) c.get('/?l=YY_ZZ') assert session.get('locale') is None assert not_translated == gettext(not_translated) with app.test_request_context(): assert '' == render_template('locales.html') with app.test_client() as c: c.get('/') locales = render_template('locales.html') assert '?l=fr_FR' in locales assert '?l=en_US' not in locales c.get('/?l=ar') base = render_template('base.html') assert 'dir="rtl"' in base # the canonical locale name is norsk bokmål but # this is overriden with just norsk by i18n.NAME_OVERRIDES with app.test_client() as c: c.get('/?l=nb_NO') base = render_template('base.html') assert 'norsk' in base assert 'norsk bo' not in base def test_get_supported_locales(): locales = ['en_US', 'fr_FR'] assert ['en_US'] == i18n._get_supported_locales( locales, None, None, None) locales = ['en_US', 'fr_FR'] supported = ['en_US', 'not_found'] with pytest.raises(i18n.LocaleNotFound) as excinfo: i18n._get_supported_locales(locales, supported, None, None) assert "contains ['not_found']" in str(excinfo.value) supported = ['fr_FR'] locale = 'not_found' with pytest.raises(i18n.LocaleNotFound) as excinfo: i18n._get_supported_locales(locales, supported, locale, None) assert "DEFAULT_LOCALE 'not_found'" in str(excinfo.value) # Grab the journalist_app fixture to trigger creation of resources def test_i18n(journalist_app, config): # Then delete it because using it won't test what we want del journalist_app sources = [ os.path.join(TESTS_DIR, 'i18n/code.py'), os.path.join(TESTS_DIR, 'i18n/template.html'), ] i18n_tool.I18NTool().main([ '--verbose', 'translate-messages', '--mapping', os.path.join(TESTS_DIR, 'i18n/babel.cfg'), '--translations-dir', config.TEMP_DIR, '--sources', ",".join(sources), '--extract-update', ]) pot = os.path.join(config.TEMP_DIR, 'messages.pot') pybabel('init', '-i', pot, '-d', config.TEMP_DIR, '-l', 'en_US') for (l, s) in (('fr_FR', 'code bonjour'), ('zh_Hans_CN', 'code chinese'), ('ar', 'code arabic'), ('nb_NO', 'code norwegian'), ('es_ES', 'code spanish')): pybabel('init', '-i', pot, '-d', config.TEMP_DIR, '-l', l) po = os.path.join(config.TEMP_DIR, l, 'LC_MESSAGES/messages.po') sed('-i', '-e', '/code hello i18n/,+1s/msgstr ""/msgstr "{}"/'.format(s), po) i18n_tool.I18NTool().main([ '--verbose', 'translate-messages', '--translations-dir', config.TEMP_DIR, '--compile', ]) fake_config = SDConfig() fake_config.SUPPORTED_LOCALES = [ 'en_US', 'fr_FR', 'zh_Hans_CN', 'ar', 'nb_NO'] fake_config.TRANSLATION_DIRS = config.TEMP_DIR # Use our config (and not an app fixture) because the i18n module # grabs values at init time and we can't inject them later. for app in (journalist_app_module.create_app(fake_config), source_app.create_app(fake_config)): with app.app_context(): db.create_all() assert i18n.LOCALES == fake_config.SUPPORTED_LOCALES verify_i18n(app) def test_verify_default_locale_en_us_if_not_defined_in_config(config): class Config: def __getattr__(self, name): if name == 'DEFAULT_LOCALE': raise AttributeError() return getattr(config, name) not_translated = 'code hello i18n' with source_app.create_app(Config()).test_client() as c: c.get('/') assert not_translated == gettext(not_translated) def test_locale_to_rfc_5646(): assert i18n.locale_to_rfc_5646('en') == 'en' assert i18n.locale_to_rfc_5646('en-US') == 'en' assert i18n.locale_to_rfc_5646('en_US') == 'en' assert i18n.locale_to_rfc_5646('en-us') == 'en' assert i18n.locale_to_rfc_5646('zh-hant') == 'zh-Hant' # Grab the journalist_app fixture to trigger creation of resources def test_html_en_lang_correct(journalist_app, config): # Then delete it because using it won't test what we want del journalist_app app = journalist_app_module.create_app(config).test_client() resp = app.get('/', follow_redirects=True) html = resp.data.decode('utf-8') assert re.compile('<html .*lang="en".*>').search(html), html app = source_app.create_app(config).test_client() resp = app.get('/', follow_redirects=True) html = resp.data.decode('utf-8') assert re.compile('<html .*lang="en".*>').search(html), html # check '/generate' too because '/' uses a different template resp = app.get('/generate', follow_redirects=True) html = resp.data.decode('utf-8') assert re.compile('<html .*lang="en".*>').search(html), html # Grab the journalist_app fixture to trigger creation of resources def test_html_fr_lang_correct(journalist_app, config): """Check that when the locale is fr_FR the lang property is correct""" # Then delete it because using it won't test what we want del journalist_app config.SUPPORTED_LOCALES = ['fr_FR', 'en_US'] app = journalist_app_module.create_app(config).test_client() resp = app.get('/?l=fr_FR', follow_redirects=True) html = resp.data.decode('utf-8') assert re.compile('<html .*lang="fr".*>').search(html), html app = source_app.create_app(config).test_client() resp = app.get('/?l=fr_FR', follow_redirects=True) html = resp.data.decode('utf-8') assert re.compile('<html .*lang="fr".*>').search(html), html # check '/generate' too because '/' uses a different template resp = app.get('/generate?l=fr_FR', follow_redirects=True) html = resp.data.decode('utf-8') assert re.compile('<html .*lang="fr".*>').search(html), html
ehartsuyker/securedrop
securedrop/tests/test_i18n.py
Python
agpl-3.0
11,053
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ MonitorMixinBase class used in monitor mixin framework. Using a monitor mixin with your algorithm ----------------------------------------- 1. Create a subclass of your algorithm class, with the first parent being the corresponding Monitor class. For example, class MonitoredTemporalMemory(TemporalMemoryMonitorMixin, TemporalMemory): pass 2. Create an instance of the monitored class and use that. instance = MonitoredTemporalMemory() # Run data through instance 3. Now you can call the following methods to print monitored data from of your instance: - instance.mmPrettyPrintMetrics(instance.mmGetDefaultMetrics()) - instance.mmPrettyPrintTraces(instance.mmGetDefaultTraces()) Each specific monitor also has specific methods you can call to extract data out of it. Adding data to a monitor mixin ----------------------------------------- 1. Create a variable for the data you want to capture in your specific monitor's `mmClearHistory` method. For example, self._mmTraces["predictedCells"] = IndicesTrace(self, "predicted cells") Make sure you use the correct type of trace for your data. 2. Add data to this trace in your algorithm's `compute` method (or anywhere else). self._mmTraces["predictedCells"].data.append(set(self.getPredictiveCells())) 3. You can optionally add this trace as a default trace in `mmGetDefaultTraces`, or define a function to return that trace: def mmGetTracePredictiveCells(self): Any trace can be converted to a metric using the utility functions provided in the framework (see `metric.py`). Extending the functionality of the monitor mixin framework ----------------------------------------- If you want to add new types of traces and metrics, add them to `trace.py` and `metric.py`. You can also create new monitors by simply defining new classes that inherit from MonitorMixinBase. """ import abc import numpy from prettytable import PrettyTable from nupic.research.monitor_mixin.plot import Plot class MonitorMixinBase(object): """ Base class for MonitorMixin. Each subclass will be a mixin for a particular algorithm. All arguments, variables, and methods in monitor mixin classes should be prefixed with "mm" (to avoid collision with the classes they mix in to). """ __metaclass__ = abc.ABCMeta def __init__(self, *args, **kwargs): """ Note: If you set the kwarg "mmName", then pretty-printing of traces and metrics will include the name you specify as a tag before every title. """ self.mmName = kwargs.get("mmName") if "mmName" in kwargs: del kwargs["mmName"] super(MonitorMixinBase, self).__init__(*args, **kwargs) # Mapping from key (string) => trace (Trace) self._mmTraces = None self._mmData = None self.mmClearHistory() def mmClearHistory(self): """ Clears the stored history. """ self._mmTraces = {} self._mmData = {} @staticmethod def mmPrettyPrintTraces(traces, breakOnResets=None): """ Returns pretty-printed table of traces. @param traces (list) Traces to print in table @param breakOnResets (BoolsTrace) Trace of resets to break table on @return (string) Pretty-printed table of traces. """ assert len(traces) > 0, "No traces found" table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces]) for i in xrange(len(traces[0].data)): if breakOnResets and breakOnResets.data[i]: table.add_row(["<reset>"] * (len(traces) + 1)) table.add_row([i] + [trace.prettyPrintDatum(trace.data[i]) for trace in traces]) return table.get_string().encode("utf-8") @staticmethod def mmPrettyPrintMetrics(metrics, sigFigs=5): """ Returns pretty-printed table of metrics. @param metrics (list) Traces to print in table @param sigFigs (int) Number of significant figures to print @return (string) Pretty-printed table of metrics. """ assert len(metrics) > 0, "No metrics found" table = PrettyTable(["Metric", "mean", "standard deviation", "min", "max", "sum", ]) for metric in metrics: table.add_row([metric.prettyPrintTitle()] + metric.getStats()) return table.get_string().encode("utf-8") def mmGetDefaultTraces(self, verbosity=1): """ Returns list of default traces. (To be overridden.) @param verbosity (int) Verbosity level @return (list) Default traces """ return [] def mmGetDefaultMetrics(self, verbosity=1): """ Returns list of default metrics. (To be overridden.) @param verbosity (int) Verbosity level @return (list) Default metrics """ return [] def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="", showReset=False, resetShading=0.25): """ Returns plot of the cell activity. Note that if many timesteps of activities are input, matplotlib's image interpolation may omit activities (columns in the image). @param cellTrace (list) a temporally ordered list of sets of cell activities @param cellCount (int) number of cells in the space being rendered @param activityType (string) type of cell activity being displayed @param title (string) an optional title for the figure @param showReset (bool) if true, the first set of cell activities after a reset will have a grayscale background @param resetShading (float) applicable if showReset is true, specifies the intensity of the reset background with 0.0 being white and 1.0 being black @return (Plot) plot """ plot = Plot(self, title) resetTrace = self.mmGetTraceResets().data data = numpy.zeros((cellCount, 1)) for i in xrange(len(cellTrace)): # Set up a "background" vector that is shaded or blank if showReset and resetTrace[i]: activity = numpy.ones((cellCount, 1)) * resetShading else: activity = numpy.zeros((cellCount, 1)) activeIndices = cellTrace[i] activity[list(activeIndices)] = 1 data = numpy.concatenate((data, activity), 1) plot.add2DArray(data, xlabel="Time", ylabel=activityType, name=title) return plot
blueburningcoder/nupic
src/nupic/research/monitor_mixin/monitor_mixin_base.py
Python
agpl-3.0
7,350
import serial import time, os import traceback gpsPort = None currentSentence = "" debug = False def begin(): global gpsPort #Open serial port gpsPort = serial.Serial("/dev/ttyS1",9600,timeout=3) def getSentence(code, timeout): global gpsPort, debug, currentSentence startTime = time.time() while((time.time() - startTime) < timeout): try: s = gpsPort.readline() if(len(s) > 10): if(s[:6] == code): #Only interested in position updates for now if(debug): print "Got " + code + ": " + s with open(os.environ['DIR'] + '/gps.log', 'a') as f: f.write("Recieved Sentence: " + s + "\n") return s except KeyboardInterrupt: exit(0) except: try: #Log exception for failure analysis with open(os.environ['DIR'] + '/gps.log', 'a') as f: f.write("+++++ EXCEPTION +++++\n") f.write(traceback.format_exc() + "\n") if(debug): print traceback.format_exc() except: pass #Just incase there is an exception in the exception handler... return "" return "" #begin() def main(): global debug debug = True begin() while(True): getSentence("$GPGGA",3) if __name__ == "__main__": main()
daveshah1/nova
rover/gps.py
Python
gpl-2.0
1,475
import unittest from pyramid import testing from pytest import fixture from pytest import mark class ConfigViewTest(unittest.TestCase): def call_fut(self, request): from adhocracy_frontend import config_view return config_view(request) def test_with_empty_settings(self): request = testing.DummyRequest(scheme='http') request.registry.settings = None assert self.call_fut(request) == \ {'ws_url': 'ws://example.com:6561', 'pkg_path': '/static/js/Packages', 'rest_url': 'http://localhost:6541', 'rest_platform_path': '/adhocracy/', 'trusted_domains': [], 'support_email': 'support@unconfigured.domain', 'locale': 'en', 'site_name': 'Adhocracy', 'canonical_url': 'http://localhost:6551', 'custom': {}, 'debug': False, 'piwik_enabled': False, 'piwik_host': None, 'piwik_site_id': None, 'piwik_track_user_id': False, 'piwik_use_cookies': False, 'terms_url': None} def test_ws_url_without_ws_url_settings_scheme_https(self): request = testing.DummyRequest(scheme='https') request.registry.settings = None assert self.call_fut(request)['ws_url'] == 'wss://example.com:6561' def test_ws_url_with_ws_url_settings(self): request = testing.DummyRequest(scheme='http') request.registry.settings = {'adhocracy.frontend.ws_url': 'ws://l.x'} assert self.call_fut(request)['ws_url'] == 'ws://l.x' def test_pkg_path_with_pkg_path_settings(self): request = testing.DummyRequest(scheme='http') request.registry.settings = {'adhocracy.frontend.pkg_path': '/t'} assert self.call_fut(request)['pkg_path'] == '/t' def test_root_path_with_platform_settings(self): request = testing.DummyRequest(scheme='http') request.registry.settings = {'adhocracy.rest_platform_path': '/adhocracy2/'} assert self.call_fut(request)['rest_platform_path'] == '/adhocracy2/' def test_root_path_with_rest_url_settings(self): request = testing.DummyRequest(scheme='http') request.registry.settings = {'adhocracy.frontend.rest_url': 'x.org'} assert self.call_fut(request)['rest_url'] == 'x.org' def test_support_email_with_support_email_settings(self): request = testing.DummyRequest(scheme='http') request.registry.settings = { 'adhocracy.frontend.support_email': 'x.org' } assert self.call_fut(request)['support_email'] == 'x.org' class RootViewTest(unittest.TestCase): def call_fut(self, request): from adhocracy_frontend import root_view return root_view(request) def test_call_and_root_html_exists(self): request = testing.DummyRequest(scheme='https') resp = self.call_fut(request) assert resp.status_code == 200 assert resp.body_file class ViewsFunctionalTest(unittest.TestCase): def setUp(self): from adhocracy_frontend import main from webtest import TestApp app = main({}) self.testapp = TestApp(app) @mark.xfail(reason='asset build:/stylesheets/a3.css must exists') def test_static_view(self): resp = self.testapp.get('/static/root.html', status=200) assert '200' in resp.status def test_config_json_view(self): resp = self.testapp.get('/config.json', status=200) assert '200' in resp.status @mark.xfail(reason='asset build:/stylesheets/a3.css must exists') def test_embed_view(self): resp = self.testapp.get('/embed/XX', status=200) assert '200' in resp.status @mark.xfail(reason='asset build:/stylesheets/a3.css must exists') def test_register_view(self): resp = self.testapp.get('/register', status=200) assert '200' in resp.status @fixture() def integration(config): config.include('adhocracy_frontend') @mark.usefixtures('integration') class TestIntegrationIncludeme: def test_includeme(self): """Check that includeme runs without errors.""" assert True def test_register_subscriber(self, registry): from . import add_cors_headers handlers = [x.handler.__name__ for x in registry.registeredHandlers()] assert add_cors_headers.__name__ in handlers
fhartwig/adhocracy3.mercator
src/adhocracy_frontend/adhocracy_frontend/test_init_.py
Python
agpl-3.0
4,475
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A library of common shape functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import tensor_shape def scalar_shape(unused_op): """Shape function for ops that output a scalar value.""" return [tensor_shape.scalar()] def unchanged_shape(op): """Shape function for ops that output an tensor like their first input.""" return [op.inputs[0].get_shape()] def unchanged_shape_with_rank(rank): """Returns a shape function for ops that constrain the rank of their input. Args: rank: The exact rank of the input and output. Returns: A shape function for ops that output a tensor of the same size as their input, with a particular rank. """ def _ShapeFunction(op): return [op.inputs[0].get_shape().with_rank(rank)] return _ShapeFunction def unchanged_shape_with_rank_at_least(rank): """Returns a shape function for ops that constrain the rank of their input. Args: rank: A lower bound on the rank of the input and output. Returns: A shape function for ops that output a tensor of the same size as their input, with a particular rank. """ def _ShapeFunction(op): return [op.inputs[0].get_shape().with_rank_at_least(rank)] return _ShapeFunction def unchanged_shape_with_rank_at_most(rank): """Returns a shape function for ops that constrain the rank of their input. Args: rank: An upper bound on the rank of the input and output. Returns: A shape function for ops that output a tensor of the same size as their input, with a particular rank. """ def _ShapeFunction(op): return [op.inputs[0].get_shape().with_rank_at_most(rank)] return _ShapeFunction def matmul_shape(op): """Shape function for a MatMul op.""" a_shape = op.inputs[0].get_shape().with_rank(2) transpose_a = op.get_attr("transpose_a") b_shape = op.inputs[1].get_shape().with_rank(2) transpose_b = op.get_attr("transpose_b") output_rows = a_shape[1] if transpose_a else a_shape[0] output_cols = b_shape[0] if transpose_b else b_shape[1] inner_a = a_shape[0] if transpose_a else a_shape[1] inner_b = b_shape[1] if transpose_b else b_shape[0] inner_a.assert_is_compatible_with(inner_b) return [tensor_shape.TensorShape([output_rows, output_cols])] def bias_add_shape(op): """Shape function for a BiasAdd op.""" input_shape = op.inputs[0].get_shape().with_rank_at_least(2) bias_shape = op.inputs[1].get_shape().with_rank(1) if input_shape.ndims is not None: # Output has the same shape as input, and matches the length of # bias in its last dimension. output_shape = input_shape[0:-1].concatenate( input_shape[-1].merge_with(bias_shape[0])) else: output_shape = tensor_shape.unknown_shape() return [output_shape] def get2d_conv_output_size(input_height, input_width, filter_height, filter_width, row_stride, col_stride, padding_type): """Returns the number of rows and columns in a convolution/pooling output.""" input_height = tensor_shape.as_dimension(input_height) input_width = tensor_shape.as_dimension(input_width) filter_height = tensor_shape.as_dimension(filter_height) filter_width = tensor_shape.as_dimension(filter_width) row_stride = int(row_stride) col_stride = int(col_stride) if filter_height.value == 1 and filter_width.value == 1 and ( row_stride == 1 and col_stride == 1): return input_height, input_width else: if filter_height > input_height or filter_width > input_width: raise ValueError( "filter must not be larger than the input: " "Filter: [%sx%s] Input: [%sx%s]" % (filter_height, filter_width, input_height, input_width)) if row_stride > filter_height or col_stride > filter_width: raise ValueError("stride must be less than or equal to filter size", "stride: [%sx%s] filter: [%sx%s]" % (row_stride, col_stride, filter_height, filter_width)) # Compute number of rows in the output, based on the padding. if input_height.value is None or filter_height.value is None: out_rows = None elif padding_type == b"VALID": out_rows = ((input_height.value - filter_height.value + row_stride) // row_stride) elif padding_type == b"SAME": out_rows = (input_height.value + row_stride - 1) // row_stride else: raise ValueError("Invalid value for padding: %r" % padding_type) # Compute number of columns in the output, based on the padding. if input_width.value is None or filter_width.value is None: out_cols = None elif padding_type == b"VALID": out_cols = ((input_width.value - filter_width.value + col_stride) // col_stride) elif padding_type == b"SAME": out_cols = (input_width.value + col_stride - 1) // col_stride return out_rows, out_cols def conv2d_shape(op): """Shape function for a Conv2D op. This op has two inputs: * input, a 4D tensor with shape = [batch_size, rows, cols, depth_in] * filter, a 4D tensor with shape = [filter_rows, filter_cols, depth_in, depth_out] The output is a 4D tensor with shape = [batch_size, out_rows, out_cols, depth_out], where out_rows and out_cols depend on the value of the op's "padding" and "strides" attrs. Args: op: A Conv2D Operation. Returns: A list containing the Shape of the Conv2D output. Raises: ValueError: If the shapes of the input or filter are incompatible. """ input_shape = op.inputs[0].get_shape().with_rank(4) filter_shape = op.inputs[1].get_shape().with_rank(4) try: data_format = op.get_attr("data_format") except ValueError: data_format = None if data_format == "NCHW": # Convert input shape to the dfeault NHWC for inference. input_shape = [input_shape[0], input_shape[2], input_shape[3], input_shape[1]] batch_size = input_shape[0] in_rows = input_shape[1] in_cols = input_shape[2] filter_rows = filter_shape[0] filter_cols = filter_shape[1] depth_out = filter_shape[3] # Check that the input depths are compatible. input_shape[3].assert_is_compatible_with(filter_shape[2]) if data_format == "NCHW": stride_b, stride_d, stride_r, stride_c = op.get_attr("strides") else: stride_b, stride_r, stride_c, stride_d = op.get_attr("strides") if stride_b != 1 or stride_d != 1: raise ValueError("Current implementation does not yet support " "strides in the batch and depth dimensions.") if stride_r != stride_c: # TODO(shlens): Add support for this. raise ValueError("Current implementation only supports equal length " "strides in the row and column dimensions.") # TODO(mrry,shlens): Raise an error if the stride would cause # information in the input to be ignored. This will require a change # in the kernel implementation. stride = stride_r padding = op.get_attr("padding") out_rows, out_cols = get2d_conv_output_size( in_rows, in_cols, filter_rows, filter_cols, stride, stride, padding) output_shape = [batch_size, out_rows, out_cols, depth_out] if data_format == "NCHW": # Convert output shape back to NCHW. output_shape = [output_shape[0], output_shape[3], output_shape[1], output_shape[2]] return [tensor_shape.TensorShape(output_shape)] def separable_conv2d_shape(op): """Shape function for a SeparableConv2D op. This op has three inputs: * input, a 4D tensor with shape = [batch_size, rows, cols, depth_in] * depthwise_filter, a 4D tensor with shape = [filter_rows, filter_cols, depth_in, depth_multiplier] * pointwise_filter, a 4D tensor with shape = [1, 1, depth_in * depth_multiplier, depth_out] The output is a 4D tensor with shape = [batch_size, out_rows, out_cols, depth_out], where out_rows and out_cols depend on the value of the op's "padding" and "strides" attrs. Args: op: A SeparableConv2D Operation. Returns: A list containing the Shape of the SeparableConv2D output. Raises: ValueError: If the shapes of the input or filter are incompatible. """ input_shape = op.inputs[0].get_shape().with_rank(4) depthwise_filter_shape = op.inputs[1].get_shape().merge_with( tensor_shape.TensorShape([None, None, input_shape[3], None])) pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3] pointwise_filter_shape = op.inputs[2].get_shape().merge_with( tensor_shape.TensorShape([1, 1, pointwise_depth_in, None])) batch_size = input_shape[0] in_rows = input_shape[1] in_cols = input_shape[2] filter_rows = depthwise_filter_shape[0] filter_cols = depthwise_filter_shape[1] depth_out = pointwise_filter_shape[3] stride_b, stride_r, stride_c, stride_d = op.get_attr("strides") if stride_b != 1 or stride_d != 1: raise ValueError("Current implementation does not yet support " "strides in the batch and depth dimensions.") if stride_r != stride_c: # TODO(shlens): Add support for this. raise ValueError("Current implementation only supports equal length " "strides in the row and column dimensions.") # TODO(mrry,shlens): Raise an error if the stride would cause # information in the input to be ignored. This will require a change # in the kernel implementation. stride = stride_r padding = op.get_attr("padding") out_rows, out_cols = get2d_conv_output_size( in_rows, in_cols, filter_rows, filter_cols, stride, stride, padding) return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])] def avg_pool_shape(op): """Shape function for an AvgPool op. This op has one input: * input, a 4D tensor with shape = [batch_size, rows, cols, depth] The output is a 4D tensor with shape = [batch_size, out_rows, out_cols, depth_out], where out_rows and out_cols depend on the value of the op's "ksize", "strides", and "padding" attrs. Args: op: An AvgPool Operation. Returns: A single-element list containing the Shape of the AvgPool output. Raises: ValueError: If the shape of the input is invalid or incompatible with the values of the attrs. """ input_shape = op.inputs[0].get_shape().with_rank(4) ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize") stride_b, stride_r, stride_c, stride_d = op.get_attr("strides") batch_size = input_shape[0] in_rows = input_shape[1] in_cols = input_shape[2] depth = input_shape[3] if ksize_b != 1 or ksize_d != 1: raise ValueError("Current implementation does not support pooling " "in the batch and depth dimensions.") if stride_b != 1 or stride_d != 1: raise ValueError("Current implementation does not support strides " "in the batch and depth dimensions.") # TODO(mrry,shlens): Raise an error if the stride would cause # information in the input to be ignored. This will require a change # in the kernel implementation. padding = op.get_attr("padding") out_rows, out_cols = get2d_conv_output_size( in_rows, in_cols, ksize_r, ksize_c, stride_r, stride_c, padding) return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth])] def max_pool_shape(op): """Shape function for a MaxPool op. This op has one input: * input, a 4D tensor with shape = [batch_size, rows, cols, depth_in] The output is a 4D tensor with shape = [batch_size, out_rows, out_cols, depth_out], where out_rows, out_cols, and depth_out depend on the value of the op's "ksize", "strides", and "padding" attrs. Args: op: A MaxPool Operation. Returns: A single-element list containing the Shape of the MaxPool output. Raises: ValueError: If the shape of the input is invalid or incompatible with the values of the attrs. """ input_shape = op.inputs[0].get_shape().with_rank(4) ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize") stride_b, stride_r, stride_c, stride_d = op.get_attr("strides") batch_size = input_shape[0] in_rows = input_shape[1] in_cols = input_shape[2] depth = input_shape[3] if ksize_b != 1: raise ValueError("Current implementation does not support pooling " "in the batch dimension.") if stride_b != 1: raise ValueError("Current implementation does not support strides " "in the batch dimension.") if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1): raise ValueError("MaxPooling supports exactly one of pooling across depth " "or pooling across width/height.") # TODO(mrry,shlens): Raise an error if the stride would cause # information in the input to be ignored. This will require a change # in the kernel implementation. if ksize_d == 1: padding = op.get_attr("padding") out_rows, out_cols = get2d_conv_output_size( in_rows, in_cols, ksize_r, ksize_c, stride_r, stride_c, padding) return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth])] else: if depth % ksize_d > 0: raise ValueError("Depthwise max pooling requires the depth window " "to evenly divide the input depth.") if stride_d != ksize_d: raise ValueError("Depthwise max pooling requires the depth window " "to equal the depth stride.") return [tensor_shape.TensorShape([batch_size, in_rows, in_cols, depth // ksize_d])] def no_outputs(unused_op): """Shape function for use with ops that have no outputs.""" return [] def unknown_shape(op): """Shape function for use with ops whose output shapes are unknown.""" return [tensor_shape.unknown_shape() for _ in op.outputs]
DailyActie/Surrogate-Model
01-codes/tensorflow-master/tensorflow/python/ops/common_shapes.py
Python
mit
15,311
# coding: utf-8 from django.db import models from django_th.models.services import Services from django_th.models import TriggerService class Joplin(Services): """ joplin model to be adapted for the new service """ folder = models.TextField() trigger = models.ForeignKey(TriggerService, on_delete=models.CASCADE) class Meta: app_label = 'th_joplin' db_table = 'django_th_joplin' def __str__(self): return self.name def show(self): return "My Joplin %s" % self.name
foxmask/django-th
th_joplin/models.py
Python
bsd-3-clause
539
# Exe 3 import random v1 = [] v2 = [] vI = [] c = 0 while c <= 9: num = random.randint(1,100) v1.append(num) vI.append(num) num = random.randint(1,100) v2.append(num) vI.append(num) c += 1 print("A lista 1 tem os elementos",v1) print("A lista 2 tem os elementos",v2) print("E a lista que carrega todos os elementos é",vI)
M3nin0/supreme-broccoli
_Massanori_Lists/lista_4/exe_3.py
Python
apache-2.0
335
"""MessageHub producer. /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ """ import base64 import logging import math import os import ssl import sys import time import traceback from kafka import KafkaProducer from kafka.errors import NoBrokersAvailable, KafkaTimeoutError, AuthenticationFailedError from kafka.version import __version__ from random import shuffle logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(levelname)-8s %(asctime)s %(message)s', datefmt='[%H:%M:%S]') max_cached_producers = 10 def main(params): producer = None logging.info("Using kafka-python %s", str(__version__)) logging.info("Validating parameters") validationResult = validateParams(params) if validationResult[0] != True: return {'error': validationResult[1]} else: validatedParams = validationResult[1] attempt = 0 max_attempts = 3 result = {"success": True} while attempt < max_attempts: attempt += 1 logging.info("Starting attempt {}".format(attempt)) try: logging.info("Getting producer") # set a client timeout that allows for 3 connection retries while still # reserving 10s for the actual send producer_timeout_ms = math.floor(getRemainingTime(reservedTime=10) / max_attempts * 1000) producer = getProducer(validatedParams, producer_timeout_ms) topic = validatedParams['topic'] logging.info("Finding topic {}".format(topic)) partition_info = producer.partitions_for(topic) logging.info("Found topic {} with partition(s) {}".format(topic, partition_info)) break except Exception as e: if attempt == max_attempts: producer = None logging.warning(e) traceback.print_exc(limit=5) result = getResultForException(e) # we successfully connected and found the topic metadata... let's send! if producer is not None: try: logging.info("Producing message") # only use the key parameter if it is present value = validatedParams['value'] if 'key' in validatedParams: messageKey = validatedParams['key'] future = producer.send( topic, bytes(value, 'utf-8'), key=bytes(messageKey, 'utf-8')) else: future = producer.send(topic, bytes(value, 'utf-8')) # future should wait all of the remaining time future_time_seconds = math.floor(getRemainingTime()) sent = future.get(timeout=future_time_seconds) msg = "Successfully sent message to {}:{} at offset {}".format( sent.topic, sent.partition, sent.offset) logging.info(msg) result = {"success": True, "message": msg} except Exception as e: logging.warning(e) traceback.print_exc(limit=5) result = getResultForException(e) return result def getResultForException(e): if isinstance(e, KafkaTimeoutError): return {'error': 'Timed out communicating with Message Hub'} elif isinstance(e, AuthenticationFailedError): return {'error': 'Authentication failed'} elif isinstance(e, NoBrokersAvailable): return {'error': 'No brokers available. Check that your supplied brokers are correct and available.'} else: return {'error': '{}'.format(e)} def validateParams(params): validatedParams = params.copy() requiredParams = ['kafka_brokers_sasl', 'user', 'password', 'topic', 'value'] missingParams = [] for requiredParam in requiredParams: if requiredParam not in params: missingParams.append(requiredParam) if len(missingParams) > 0: return (False, "You must supply all of the following parameters: {}".format(', '.join(missingParams))) if isinstance(params['kafka_brokers_sasl'], str): # turn it into a List validatedParams['kafka_brokers_sasl'] = params['kafka_brokers_sasl'].split(',') shuffle(validatedParams['kafka_brokers_sasl']) if 'base64DecodeValue' in params and params['base64DecodeValue'] == True: try: validatedParams['value'] = base64.b64decode(params['value']).decode('utf-8') except: return (False, "value parameter is not Base64 encoded") if len(validatedParams['value']) == 0: return (False, "value parameter is not Base64 encoded") if 'base64DecodeKey' in params and params['base64DecodeKey'] == True: try: validatedParams['key'] = base64.b64decode(params['key']).decode('utf-8') except: return (False, "key parameter is not Base64 encoded") if len(validatedParams['key']) == 0: return (False, "key parameter is not Base64 encoded") return (True, validatedParams) def getProducer(validatedParams, timeout_ms): connectionHash = getConnectionHash(validatedParams) if globals().get("cached_producers") is None: logging.info("dictionary was None") globals()["cached_producers"] = dict() # remove arbitrary connection to make room for new one if len(globals()["cached_producers"]) == max_cached_producers: poppedProducer = globals()["cached_producers"].popitem()[1] poppedProducer.close(timeout=1) logging.info("Removed cached producer") if connectionHash not in globals()["cached_producers"]: logging.info("cache miss") # create a new connection sasl_mechanism = 'PLAIN' security_protocol = 'SASL_SSL' # Create a new context using system defaults, disable all but TLS1.2 context = ssl.create_default_context() context.options &= ssl.OP_NO_TLSv1 context.options &= ssl.OP_NO_TLSv1_1 producer = KafkaProducer( api_version=(0, 10), batch_size=0, bootstrap_servers=validatedParams['kafka_brokers_sasl'], max_block_ms=timeout_ms, request_timeout_ms=timeout_ms, sasl_plain_username=validatedParams['user'], sasl_plain_password=validatedParams['password'], security_protocol=security_protocol, ssl_context=context, sasl_mechanism=sasl_mechanism ) logging.info("Created producer") # store the producer globally for subsequent invocations globals()["cached_producers"][connectionHash] = producer # return it return producer else: logging.info("Reusing existing producer") return globals()["cached_producers"][connectionHash] def getConnectionHash(params): apiKey = "{}:{}".format(params['user'], params['password']) return apiKey # return the remaining time (in seconds) until the action will expire, # optionally reserving some time (also in seconds). def getRemainingTime(reservedTime=0): deadlineSeconds = int(os.getenv('__OW_DEADLINE', 60000)) / 1000 remaining = deadlineSeconds - time.time() - reservedTime # ensure value is at least zero # yes, this is a little paranoid return max(remaining, 0)
dubeejw/openwhisk-package-kafka
action/messageHubProduce.py
Python
apache-2.0
8,014
#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # ## ## Edit a properties config file and replace values based on ## the ENV variables ## export my-key=new-value ## ./apply-config-from-env file.conf ## import os, sys if len(sys.argv) < 2: print 'Usage: %s' % (sys.argv[0]) sys.exit(1) # Always apply env config to env scripts as well conf_files = ['conf/pulsar_env.sh', 'conf/bkenv.sh'] + sys.argv[1:] for conf_filename in conf_files: lines = [] # List of config file lines keys = {} # Map a key to its line number in the file # Load conf file for line in open(conf_filename): lines.append(line) line = line.strip() if not line or line.startswith('#'): continue k,v = line.split('=', 1) keys[k] = len(lines) - 1 # Update values from Env for k in sorted(os.environ.keys()): v = os.environ[k] if k in keys: print '[%s] Applying config %s = %s' % (conf_filename, k, v) idx = keys[k] lines[idx] = '%s=%s\n' % (k, v) # Store back the updated config in the same file f = open(conf_filename, 'w') for line in lines: f.write(line) f.close()
yush1ga/pulsar
docker/scripts/apply-config-from-env.py
Python
apache-2.0
1,964
from coalib.bearlib.abstractions.Linter import linter from dependency_management.requirements.PipRequirement import PipRequirement @linter(executable='vint', output_format='regex', output_regex=r'.+:(?P<line>\d+):(?P<column>\d+): (?P<message>.+)') class VintBear: """ Check vimscript code for possible style problems. See <https://github.com/Kuniwak/vint> for more information. """ LANGUAGES = {'VimScript'} REQUIREMENTS = {PipRequirement('vim-vint', '0.3.12')} AUTHORS = {'The coala developers'} AUTHORS_EMAILS = {'coala-devel@googlegroups.com'} LICENSE = 'AGPL-3.0' CAN_DETECT = {'Formatting'} @staticmethod def create_arguments(filename, file, config_file): return filename,
IPMITMO/statan
coala-bears/bears/vimscript/VintBear.py
Python
mit
747
import sys if sys.version_info < (3, 7): from ._symbolsrc import SymbolsrcValidator from ._symbol import SymbolValidator from ._sizesrc import SizesrcValidator from ._sizeref import SizerefValidator from ._sizemode import SizemodeValidator from ._sizemin import SizeminValidator from ._size import SizeValidator from ._showscale import ShowscaleValidator from ._reversescale import ReversescaleValidator from ._opacitysrc import OpacitysrcValidator from ._opacity import OpacityValidator from ._line import LineValidator from ._colorsrc import ColorsrcValidator from ._colorscale import ColorscaleValidator from ._colorbar import ColorbarValidator from ._coloraxis import ColoraxisValidator from ._color import ColorValidator from ._cmin import CminValidator from ._cmid import CmidValidator from ._cmax import CmaxValidator from ._cauto import CautoValidator from ._autocolorscale import AutocolorscaleValidator else: from _plotly_utils.importers import relative_import __all__, __getattr__, __dir__ = relative_import( __name__, [], [ "._symbolsrc.SymbolsrcValidator", "._symbol.SymbolValidator", "._sizesrc.SizesrcValidator", "._sizeref.SizerefValidator", "._sizemode.SizemodeValidator", "._sizemin.SizeminValidator", "._size.SizeValidator", "._showscale.ShowscaleValidator", "._reversescale.ReversescaleValidator", "._opacitysrc.OpacitysrcValidator", "._opacity.OpacityValidator", "._line.LineValidator", "._colorsrc.ColorsrcValidator", "._colorscale.ColorscaleValidator", "._colorbar.ColorbarValidator", "._coloraxis.ColoraxisValidator", "._color.ColorValidator", "._cmin.CminValidator", "._cmid.CmidValidator", "._cmax.CmaxValidator", "._cauto.CautoValidator", "._autocolorscale.AutocolorscaleValidator", ], )
plotly/python-api
packages/python/plotly/plotly/validators/splom/marker/__init__.py
Python
mit
2,113
# -*- coding: utf-8 -*- # Copyright 2010-2014, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A script to ensure 'import gyp' loads a gyp module from expected location. Example: ./ensure_gyp_module_path.py --expected=/home/foobar/work/mozc/src/third_party/gyp/pylib/gyp """ __author__ = "yukawa" import optparse import os import sys def ParseOption(): """Parse command line options.""" parser = optparse.OptionParser() parser.add_option('--expected', dest='expected') (options, _) = parser.parse_args() if not options.expected: print parser.print_help() sys.exit(1) return options def main(): """Script to ensure gyp module location.""" opt = ParseOption() expected_path = os.path.abspath(opt.expected) if not os.path.exists(expected_path): print '%s does not exist.' % expected_path sys.exit(1) try: import gyp # NOLINT except ImportError as e: print 'import gyp failed: %s' % e sys.exit(1) actual_path = os.path.abspath(gyp.__path__[0]) if expected_path != actual_path: print 'Unexpected gyp module is loaded on this environment.' print ' expected: %s' % expected_path print ' actual : %s' % actual_path sys.exit(1) if __name__ == '__main__': main()
kishikawakatsumi/Mozc-for-iOS
src/build_tools/ensure_gyp_module_path.py
Python
apache-2.0
2,712
#!/usr/bin/python ########## # # Shape2Pose scripts library: file management, job scheduling (if running parallel), scikit-learn interfaces # ########## import os, string, decimal, glob, re, subprocess, shlex, sys, datetime, time, getpass ###### Job Scheduling ###### maxJobs=500; usrName = getpass.getuser(); def ScheduleJob(cmd, jobName, scriptFile): jobID = []; if IsParallel(): cmdToSchedule = cmd; assert(not os.path.isfile(scriptFile)); # cannot schedule if file exists path = os.path.dirname(scriptFile); if not os.path.isdir(path): os.makedirs(path); f = open(scriptFile+".sh", 'w'); f.write(cmd); f.close(); os.system("chmod 777 "+scriptFile+".sh"); qsubParams = "-V "; qsubParams += "-l mem=16000mb,cput=24:00:00 "; # 12 hours qsubParams += "-N "+jobName; qsubParams += " -d "+os.getcwd()+" -o "+scriptFile+".log -e "+scriptFile+".err"; cmd = "qsub "+qsubParams+" "+scriptFile+".sh "; if int(NumMyJobs()) >= int(maxJobs): while (int(NumMyJobs()) >= .9 * int(maxJobs)): print("Too many jobs ("+str(NumMyJobs())+" / "+str(.9 * int(maxJobs))+")... waiting 5s"); time.sleep(5); time.sleep(.01); subPID = subprocess.Popen("qsub "+qsubParams+" "+scriptFile+".sh ", \ shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) idStr, errors = subPID.communicate(); if errors!="": print(errors); print("[WARNING] Error executing: "+scriptFile+".sh.\n Trying to re-run. NJobs="+str(NumMyJobs())); time.sleep(1); subPID = subprocess.Popen("qsub "+qsubParams+" "+scriptFile+".sh ", \ shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) idStr, errors = subPID.communicate(); if errors!="": time.sleep(1); subPID = subprocess.Popen("qsub "+qsubParams+" "+scriptFile+".sh ", \ shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) idStr, errors = subPID.communicate(); if errors!="": print(" [ERROR] Third run failed as well... quitting..."); assert(False); jobID =[idStr.split(".")[0] +" " +cmdToSchedule]; else: os.system(cmd); return jobID; def TmpFilesNames(prefix): return [prefix+".sh", prefix+".err", prefix+".log"]; def NumMyJobs(): subPID = subprocess.Popen("qstat | grep -c "+usrName, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) nJobs, errors = subPID.communicate(); if nJobs=='\n' or nJobs=='': return 0; else: return int(nJobs); def IsParallel(): subPID = subprocess.Popen("type qsub", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) qsubExists, errors = subPID.communicate(); if len(qsubExists)>=4 and qsubExists[:4]=="qsub": return True; # can qsub jobs else: return False; # cannot qsub jobs def WriteJobsFile(jobsIDs, filename): if filename != "none": f = open(filename, "w"); f.write("\n".join(jobsIDs)); f.close(); def ReadJobsFile(filename): if (IsParallel()): return open(filename, "r").read().strip().split("\n"); else: return []; def WaitForAllJobs(): if IsParallel(): while NumMyJobs()>0: print(" Waiting for jobs: "+str(NumMyJobs())) time.sleep(5) def WaitForJobsInFile(filename): if (IsParallel()): if (filename=="none"): WaitForAllJobs(); else: WaitForJobsInArray(ReadJobsFile(filename)); def WaitForJobsInArray(jobsIds): if IsParallel(): # list of jobs and their status jobID = []; jobCMD = []; jobStatus = []; jobInitRunTime = []; lastVisit = []; jobRestarts = []; # jobID map jMap = dict(); # initialize job data for job in jobsIds: jobSplitStr = job.split(" "); jobID.append(jobSplitStr[0]); jobCMD.append(" ".join(jobSplitStr[1:])); jobStatus.append("Q"); jobInitRunTime.append(-1); jMap[jobSplitStr[0]] = len(jobID)-1; lastVisit.append(0); jobRestarts.append(0); # wait for jobs to be done... counterSleeps = 0; sleepTime = 5; # in seconds visitCounter = 0; while len(jobID)>0: # jobs remain nRunning = 0; nTotal = 0; # check if idle jobs can be udpated # go over running jobs - set check id subPID = subprocess.Popen("qstat | grep "+usrName+" ", \ shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) jobList, errors = subPID.communicate(); jList = jobList.split("\n"); visitCounter = visitCounter + 1; for j in jList: # check which jobs remain if (j==""): continue; nTotal = nTotal + 1; jobDescrWithEmpty = j.strip().split(" "); jobDescr = []; for val in jobDescrWithEmpty: if val != "": jobDescr.append(val); jid = jobDescr[0].split(".")[0].strip(); jName = jobDescr[1].strip(); jTimeUse = jobDescr[3].strip(); jstat = jobDescr[4].strip(); if jid in jMap: lid = jMap[jid]; lastVisit[lid] = visitCounter; if jstat != "R" and jstat != "Q" and jstat != "E": print("[WARNING] Invalid jstat: "+jstat); if jstat != jobStatus[lid]: if (jobStatus[lid]+jstat)=="QR": jobStatus[lid] = jstat; jobInitRunTime[lid] = 0; elif (not jobStatus[lid]+jstat)=="RE": print("[WARNING] Unexpected state change: "+jobStatus[lid]+" -> "+jstat); if (jobStatus[lid]=="R"): nRunning = nRunning + 1; # if some job was never visited - done. erase toRemove = []; for j in range(0, len(lastVisit)): if (visitCounter!=lastVisit[j]): toRemove.append(j); # erase all jobs that were visited if (len(toRemove)>0): toRemove = sorted(toRemove, reverse=True); for j in toRemove: lastVisit.pop(j); jobID.pop(j); jobCMD.pop(j); jobStatus.pop(j); jobInitRunTime.pop(j); jobRestarts.pop(j); jMap = dict(); for j in range(0, len(jobID)): jMap[jobID[j]] = j; counterSleeps += sleepTime; if (len(jMap)>0): print(" Waiting for jobs: "+str(nRunning)+" / "+str(len(jMap))+" / "+str(nTotal)); time.sleep(sleepTime) ###### Reading / Writing lists ###### def NameOnly(filename): id = string.rfind(filename, "/") return filename[(id+1):] def ReadModelNames(filename): if os.path.isfile(filename): f = open(filename, "r"); mList = f.readlines(); mList = [NameOnly(s.strip()) for s in mList]; f.close(); elif os.path.isdir(filename): mListTemp = glob.glob(filename+"/*"); mListTemp = [NameOnly(s.strip()) for s in mListTemp]; mList = []; for mName in mListTemp: if mName[-4:]==".off": mName = mName[:-4]; elif mName[-3:]==".gt": mName = mName[:-3]; mList = mList + [mName]; else: print("[ERROR] Could not read filename: "+filename); mList = []; return mList; def WriteModelNames(mList, filename): with open(filename, "w") as f: f.write("\n".join(mList)) def ReadRegionNames(rFile): newRList = []; with open(rFile, "r") as f: rList = f.readlines(); for s in rList: sSplit = s.split(" "); newRList.append(sSplit[0].strip()); return newRList; ###### Weka classification (move to training?) ###### def WekaTrainClassifier(trainingData, trainedClassifier, statsFile): wekajar = "../../external/weka-3-7-9/weka.jar"; classifier = "weka.classifiers.trees.M5P"; cmd = "java -Djava.awt.headless=true -Xmx2000m -cp "+wekajar+" "+classifier+" -t "+trainingData+" "; cmd += "-d "+trainedClassifier+" -M 10 "; #-M 15 -- 4 still seems to be the best :( return cmd+" > "+statsFile+" "; def WekaClassify(trainedClassifier, unclassifiedPoints, classifiedPoints): wekajar = "../../external/weka-3-7-9/weka.jar"; classifier = "weka.classifiers.trees.M5P"; cmd = "java -Djava.awt.headless=true -Xmx2000m -cp "+wekajar+" "+classifier+" -l "+trainedClassifier+" "; cmd += "-T "+unclassifiedPoints+" -p 0 > "+classifiedPoints+" "; return cmd; pythonname = "python2.7 "; def SciKitTrainClassifier(trainingData, trainedClassifier, statsFile): cmd = pythonname+"./scriptlibs/randomforest.py -train "+trainingData+" "+trainedClassifier+" "+statsFile+" "; return cmd; def SciKitClassify(trainedClassifier, unclassifiedPoints, classifiedPoints): cmd = pythonname+"./scriptlibs/randomforest.py -test "+trainedClassifier+" "+unclassifiedPoints+" "+classifiedPoints+" "; return cmd;
mhsung/structure-completion
python/fas.py
Python
mit
8,178
import os import requests import seaborn as sns import shelve from operator import itemgetter import matplotlib.image as mpimg import praw from praw.helpers import submissions_between user = os.environ['REDDIT_USERNAME'] user_agent = 'Calculating % of downvoted submissions 0.1 by /u/{}' r = praw.Reddit(user_agent) subreddit_stats = shelve.open("subreddit_stats.shelve", writeback=True) highest_timestamp = 1449446399 # 06.12.2015 lowest_timestamp = highest_timestamp - 7*60*60*24 + 1 def get_default_subreddits_names(): r = requests.get('https://www.reddit.com/subreddits/default.json?limit=100', headers={'User-Agent': user_agent}) return [sub_info['data']['display_name'] for sub_info in r.json()['data']['children']] def get_sub_names(sub_list): return [s.display_name for s in sub_list] def get_top_subreddits(): if '_top_defaults' in subreddit_stats and '_top_non_defaults' in subreddit_stats: return default_subreddits_names = get_default_subreddits_names() popular_subreddits = list(r.get_popular_subreddits(limit=1000)) top_defaults = [s for s in popular_subreddits if s.display_name in default_subreddits_names] if len(top_defaults) < len(default_subreddits_names): for sub in set(default_subreddits_names) - set(get_sub_names(top_defaults)): # the top will probably be slightly incorrect # The 3 missing subs are philosophy, announcements and blog top_defaults.append(r.get_subreddit(sub)) top_non_defaults = [s for s in popular_subreddits if s.display_name not in default_subreddits_names] assert len(top_defaults) == len(default_subreddits_names) assert len(top_non_defaults) >= 50 subreddit_stats['_top_defaults'] = get_sub_names(top_defaults) subreddit_stats['_top_non_defaults'] = get_sub_names(top_non_defaults) subreddit_stats.sync() def get_subreddit_stats(sublist, subreddit_stats): for sub in sublist: if str(sub) in subreddit_stats: continue downvoted_submissions = 0 total_submissions = 0 for submission in submissions_between(r, sub, lowest_timestamp=lowest_timestamp, highest_timestamp=highest_timestamp, verbosity=0): assert submission.created_utc <= highest_timestamp and submission.created_utc >= lowest_timestamp if submission.score <= 0: downvoted_submissions += 1 total_submissions += 1 subreddit_stats[sub] = {"downvoted_submissions": downvoted_submissions, "total_submissions": total_submissions} subreddit_stats.sync() print sub, total_submissions, downvoted_submissions, downvoted_submissions / float(total_submissions) if total_submissions else 0 def sort_and_process_data(subreddit_names, subreddit_stats): data = [] for sub, stats in subreddit_stats.iteritems(): if sub not in subreddit_names: continue ds = stats['downvoted_submissions'] ts = stats['total_submissions'] data.append((sub, ds, ts, float(ds) / ts if ts != 0 else 0)) return sorted(data, key=itemgetter(3, 2)) # From: http://stackoverflow.com/questions/29702424/how-to-get-matplotlib-figure-size def get_figure_size(fig): bbox = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) return bbox.width * fig.dpi, bbox.height * fig.dpi def do_plot(subreddit_names, subreddit_stats, out_file, title, license_icon_filename): licence_icon = mpimg.imread(license_icon_filename) data = sort_and_process_data(subreddit_names, subreddit_stats) y, x = zip(*[(d[0], d[3]) for d in data]) x = map(lambda x: x * 100, x) sns.set_style('whitegrid') sns.set(font="Bitstream Vera Sans", style='whitegrid') sns.set_context(rc={"figure.figsize": (16, 11), "font_scale": 1.0}) fig = sns.plt.figure() ax = sns.barplot(x=x, y=y, palette="Blues") ax.set(xlim=(0, max(x) * 1.1)) ax.set(xlabel="% Downvoted Submissions\n (% submissions having score <= 0)", ylabel="Subreddit name") ax.tick_params(labelright=True) rects = ax.patches labels = ["{} / {}".format(ds, ts) for (sub, ds, ts, dp) in data] for rect, label in zip(rects, labels): ax.text(rect.get_x() + rect.get_width() + max(x) * 0.01, rect.get_y() + rect.get_height() / 2, label, ha='left', va='center', fontsize="smaller") sns.plt.title(title) # matplotlib api is weird. I have no idea what I'm doing, but this seems to work width, height = get_figure_size(fig) # I ahve no idea why do I have to provide coordinates in different systems # But this set of params looks nice when I'm saving the pics # So fuck it, I am going to leave it like this # (I hope someone with good knowledge of matplotlib is willing to teach me all this stuff) sns.plt.figimage(licence_icon, width + 50, 10) sns.plt.figtext(0.99, 0.05, 'Made by\n /u/godlikesme', horizontalalignment='right') sns.plt.savefig(out_file, dpi=100, bbox_inches='tight') sns.plt.clf() get_top_subreddits() top_defaults = subreddit_stats['_top_defaults'][:50] top_non_defaults = subreddit_stats['_top_non_defaults'][:50] get_subreddit_stats(top_defaults, subreddit_stats) get_subreddit_stats(top_non_defaults, subreddit_stats) title_templ = "Percentage of downvoted submissions in {} (30.11.2015 - 06.12.2015)" do_plot(top_defaults, subreddit_stats, "out_top50_defaults.png", title_templ.format("all default subreddits"), "cc-by.png") do_plot(top_defaults[:30], subreddit_stats, "out_top30_defaults.png", title_templ.format("top30 default subreddits"), "cc-by.png") do_plot(top_non_defaults[:50], subreddit_stats, "out_top50_non_defaults.png", title_templ.format("top50 non-default subreddits"), "cc-by.png")
eleweek/dataisbeautiful
downvoted_submissions.py
Python
mit
5,968
# # Copyright 2016 Dohop hf. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Setup script for building supervisor-logstash-notifier """ from setuptools import setup, find_packages # 2 step 'with open' to be python2.6 compatible with open('requirements.txt') as requirements: with open('test_requirements.txt') as test_requirements: setup( name='supervisor-logstash-notifier', version='0.2.5', packages=find_packages(exclude=['tests']), url='https://github.com/dohop/supervisor-logstash-notifier', license='Apache 2.0', author='aodj', author_email='alexander@dohop.com', description='Stream supervisor events to a logstash instance', long_description=open('README.rst').read(), entry_points={ 'console_scripts': [ 'logstash_notifier = logstash_notifier:main' ] }, install_requires=requirements.read().splitlines(), test_suite='tests', tests_require=test_requirements.read().splitlines(), )
dohop/supervisor-logstash-notifier
setup.py
Python
apache-2.0
1,635
#!/usr/bin/env python # coding=utf-8 from hooky import List class MyList(List): def _before_add(self, key, item): print('before add, key: {}, item: {}'.format(key, repr(item))) def _after_add(self, key, item): print(' after add, key: {}, item: {}'.format(key, repr(item))) def _before_del(self, key, item): print('before_del, key: {}, item: {}'.format(key, repr(self[key]))) def _after_del(self, key, item): print(' after_del, key: ', key) l = MyList(['a', 'b']) l.append(1) l.extend(['f', 'g', 2]) l.pop() l[2:3] = ['c', 'd', 'e'] print(l) l.clear() print(l)
meng89/hooky
docs/demo2_list.py
Python
mit
623
# encoding: utf-8 import re import os import logging import json from django.conf import settings from django import forms from django.template import Context from django.forms.widgets import FILE_INPUT_CONTRADICTION, CheckboxInput, FileInput from django.utils.encoding import force_unicode from django.template.loader import render_to_string from django.forms.utils import flatatt from django.utils.safestring import mark_safe from django.core.files.uploadedfile import InMemoryUploadedFile try: from sorl.thumbnail.shortcuts import get_thumbnail except: def get_thumbnail(image_url, *args, **kwargs): return image_url logger = logging.getLogger(__name__) EMBED_TYPES = { 'youtube': [ ( r'(https?://)?(www\.)?' '(youtube|youtu|youtube-nocookie)\.(com|be)/' '(watch\?v=|embed/|v/|.+\?v=)?([^&=%\?\s"]{11})', '<iframe class="django_extended-media" src="' 'https://www.youtube.com/embed/\\6?controls=0&amp;showinfo=0"' 'scrolling="no" frameborder="no" allowfullscreen></iframe>' ) ], 'soundcloud': [ ( r'(http[s]?\:\/\/w\.soundcloud\.com\/player\/\?url=([^"]+))', '<iframe class="django_extended-media" src="https://w.soundcloud.com/player/?url=\\2" scrolling="no" frameborder="no" allowfullscreen></iframe>' ), ( r'(http[s]?\:\/\/soundcloud\.com\/[\d\w\-_]+/[\d\w\-_]+)', '<iframe class="django_extended-media" src="https://w.soundcloud.com/player/?url=\\1" scrolling="no" frameborder="no" allowfullscreen></iframe>' ) ] } class MediaInput(forms.widgets.ClearableFileInput): template_name = 'django_extended/fields/_media_input.html' change_message = "Changer" empty_message = "Changer" class Media: css = { 'all': ( 'django_extended/forms/media.css', ) } js = ( 'django_extended/vendors/media-dropzone/jquery.media-dropzone.js', 'django_extended/forms/media.js', ) def __init__(self, *args, **kwargs): self.authorized_types = [] if kwargs.get('authorized_types'): self.authorized_types = kwargs.pop('authorized_types') super(MediaInput, self).__init__(*args, **kwargs) def render(self, name, value, attrs=None): # print "MediaInput.render : ", name, type(value), value, self.is_initial(value) if value is None: value = '' field = super(MediaInput, self).render(name, value, attrs=attrs) in_memory = False if isinstance(value, InMemoryUploadedFile): in_memory = True final_attrs = self.build_attrs(attrs, type=self.input_type, name=name) checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) context = { 'only_file': len(self.authorized_types) == 1 and 'image' in self.authorized_types, 'required': self.is_required, 'field': field, 'name': name, 'value': value, 'id': final_attrs['id'], 'final_attrs': flatatt(final_attrs), 'change_message': self.change_message, 'empty_message': self.empty_message, 'embed_types': json.dumps(EMBED_TYPES) } if not self.is_required: context['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id}) return render_to_string(self.template_name, Context(context)) # def decompress(self, value): # if value: # return value.split(' ') # return [None, None] def value_from_datadict(self, data, files, name): # print 'MediaInput.value_from_datadict : ', name, (files.get(name, None), data.get(name, None)) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name)): return False if files: return files.get(name, None) else: return data.get(name, None) # return files.get(name, None), data.get(name, None) class MediaField(forms.FileField): # widget = MediaInput default_error_messages = { 'invalid_image': "Upload a valid image. The file you uploaded was either not an image or a corrupted image.", } def __init__(self, *args, **kwargs): # required = kwargs.get('required', False) #widget = kwargs.pop('widget', None) # widget = MediaInput( attrs={ # 'class': 'django_extended-media_field', # # 'data-django_extended-media_field': self.image_field, # # 'style': 'display:none;', # }) # kwargs['widget'] = widget # kwargs['label'] = "" super(MediaField, self).__init__(*args, **kwargs) def to_python(self, data): # print 'MediaFormField.to_python : ', data, type(data) return data def has_changed(self, initial, data): # print 'MediaFormField.has_changed:', data, type(data) if data is None: return super(MediaField, self).has_changed(initial, data) return True def bound_data(self, data, initial): # print 'MediaFormField.bound_data:', data, type(data), initial, type(initial) if data in [False, None] and initial: return initial return data def clean(self, data, initial=None): if data is False: if not self.required: return False data = None if not data and initial: return initial return super(MediaField, self).clean(data)
dalou/django-extended
django_extended/forms/media.py
Python
bsd-3-clause
5,738
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import arrow from marshmallow_jsonapi import fields from woodbox.jsonapi_schema import JSONAPISchema class DocumentSchema(JSONAPISchema): document_type = fields.String() title = fields.String() body = fields.String() date_created = fields.DateTime(allow_none=True, missing=arrow.utcnow()) date_modified = fields.DateTime(allow_none=True, missing=arrow.utcnow())
patrickfournier/woodbox_example
app/api_v1/document.py
Python
apache-2.0
483
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013-15, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import numpy from PyRegion import PyRegion from nupic.data.fieldmeta import FieldMetaType class RecordSensor(PyRegion): """ A Record Sensor (RS) retrieves an information "record" and encodes it to be suitable as input to an HTM. An information record is analogous database record -- it is just a collection of typed values: date, amount, category, location, etc. The RS may obtain information from one of three sources: . a file (e.g. csv or tsv) . a sql database (not yet implemented) . a data generator (for artificial data) The RS encodes a record using an encoding scheme that can be specified programmatically. An RS is essentially a shell containing two objects: 1. A DataSource object gets one record at a time. This record is returned either as a dictionary or a user-defined object. The fields within a record correspond to entries in the dictionary or attributes of the object. For example, a DataSource might return: dict(date="02-01-2010 23:12:23", amount=4.95, country="US", _reset=0, _sequenceId=0) or an object with attributes "date", "amount" and "country". The _reset and _sequenceId attributes must always exist, and are provided by the DataSource if not directly present in the data. DataSource methods are: -- getNext() -- return the next record, which is a dict -- TBD: something like getIterationCount()? 2. A MultiEncoder object encodes one record into a fixed-sparsity distributed representation. MultiEncoder is defined in nupic.encoders The DataSource and MultiEncoder are supplied after the node is created, not in the node itself. Example usage in NuPIC: from nupic.net import Network from nupic.encoders import MultiEncoder from nupic.data.file.file_record_stream import FileRecordStream n = Network() s = n.addRegion("sensor", "py.RecordSensor", "") mysource = FileRecordStream("mydata.txt") myencoder = MultiEncoder() ... set up myencoder ... s.getSelf().dataSource = mysource s.getSelf().encoder = myencoder l1 = n.addRegion("l1", "py.FDRCNode", "[create params]") n.initialize() n.run(100) TBD: the data source could also include the type of data, and we could more closely tie the DataSource output to the encoder input, ensuring that data types match and that allfields the encoder expects to see are in fact present. """ @classmethod def getSpec(cls): ns = dict( singleNodeOnly=True, description="Sensor that reads data records and encodes them for an HTM", outputs=dict( dataOut=dict( description="Encoded data", dataType="Real32", # very inefficient for bits, but that is what we use now count=0, regionLevel=True, isDefaultOutput=True), resetOut=dict( description="Reset signal", dataType="Real32", count=1, regionLevel=True, isDefaultOutput=False), sequenceIdOut=dict( description="Sequence ID", dataType='UInt64', count=1, regionLevel=True, isDefaultOutput=False), categoryOut=dict( description="Category", dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), sourceOut=dict( description="Unencoded data from the source, input to the encoder", dataType="Real32", count=0, regionLevel=True, isDefaultOutput=False), spatialTopDownOut=dict( description="""The top-down output signal, generated from feedback from SP""", dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), temporalTopDownOut=dict( description="""The top-down output signal, generated from feedback from TP through SP""", dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), ), inputs=dict( spatialTopDownIn=dict( description="""The top-down input signal, generated from feedback from SP""", dataType='Real32', count=0, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), temporalTopDownIn=dict( description="""The top-down input signal, generated from feedback from TP through SP""", dataType='Real32', count=0, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), ), parameters=dict( verbosity=dict( description="Verbosity level", dataType="UInt32", accessMode="ReadWrite", count=1, constraints=""), numCategories=dict( description=("Total number of categories to expect from the " "FileRecordStream"), dataType="UInt32", accessMode="ReadWrite", count=1, constraints=""), topDownMode=dict( description='1 if the node should do top down compute on the next call ' 'to compute into topDownOut (default 0).', accessMode='ReadWrite', dataType='UInt32', count=1, constraints='bool'), ), commands=dict()) return ns def __init__(self, verbosity=0, numCategories=1): """ Create a node without an encoder or datasource """ self.encoder = None self.disabledEncoder = None self.dataSource = None self._outputValues = {} self.preEncodingFilters = [] self.postEncodingFilters = [] self.topDownMode = False self.verbosity = verbosity self.numCategories = numCategories self._iterNum = 0 # lastRecord is the last record returned. Used for debugging only self.lastRecord = None def __setstate__(self, state): # Default value for older versions being deserialized. self.disabledEncoder = None self.__dict__.update(state) if not hasattr(self, "numCategories"): self.numCategories = 1 def initialize(self, dims, splitterMaps): if self.encoder is None: raise Exception("Unable to initialize RecordSensor -- encoder has not been set") if self.dataSource is None: raise Exception("Unable to initialize RecordSensor -- dataSource has not been set") def rewind(self): """ Reset the sensor to beginning of data. """ self._iterNum = 0 if self.dataSource is not None: self.dataSource.rewind() def getNextRecord(self): """Get the next record to encode. Includes getting a record from the datasource and applying filters. If the filters request more data from the datasource continue to get data from the datasource until all filters are satisfied. This method is separate from compute() so that we can use a standalone RecordSensor to get filtered data""" foundData = False while not foundData: # Get the data from the dataSource data = self.dataSource.getNextRecordDict() if not data: raise StopIteration("Datasource has no more data") # temporary check if "_reset" not in data: data["_reset"] = 0 if "_sequenceId" not in data: data["_sequenceId"] = 0 if "_category" not in data: data["_category"] = [None] if self.verbosity > 0: print "RecordSensor got data: %s" % data # Apply pre-encoding filters. # These filters may modify or add data # If a filter needs another record (e.g. a delta filter) # it will request another record by returning False and the current record # will be skipped (but will still be given to all filters) # # We have to be very careful about resets. A filter may add a reset, # but other filters should not see the added reset, each filter sees # the original reset value, and we keep track of whether any filter # adds a reset. foundData = True if len(self.preEncodingFilters) > 0: originalReset = data['_reset'] actualReset = originalReset for f in self.preEncodingFilters: # if filter needs more data, it returns False result = f.process(data) foundData = foundData and result actualReset = actualReset or data['_reset'] data['_reset'] = originalReset data['_reset'] = actualReset self.lastRecord = data return data def populateCategoriesOut(self, categories, output): """ Populate the output array with the category indices. Note: non-categories are represented with -1. """ if categories[0] is None: # The record has no entry in category field. output[:] = -1 else: # Populate category output array by looping over the smaller of the # output array (size specified by numCategories) and the record's number # of categories. for i, cat in enumerate(categories[:len(output)]): output[i] = cat output[len(categories):] = -1 def compute(self, inputs, outputs): """Get a record from the dataSource and encode it.""" if not self.topDownMode: data = self.getNextRecord() # The private keys in data are standard of RecordStreamIface objects. Any # add'l keys are column headers from the data source. reset = data["_reset"] sequenceId = data["_sequenceId"] categories = data["_category"] # Encode the processed records; populate outputs["dataOut"] in place self.encoder.encodeIntoArray(data, outputs["dataOut"]) # Write out the scalar values obtained from they data source. outputs["sourceOut"][:] = self.encoder.getScalars(data) self._outputValues["sourceOut"] = self.encoder.getEncodedValues(data) # ----------------------------------------------------------------------- # Get the encoded bit arrays for each field encoders = self.encoder.getEncoderList() prevOffset = 0 sourceEncodings = [] bitData = outputs["dataOut"] for encoder in encoders: nextOffset = prevOffset + encoder.getWidth() sourceEncodings.append(bitData[prevOffset:nextOffset]) prevOffset = nextOffset self._outputValues['sourceEncodings'] = sourceEncodings # Execute post-encoding filters, if any for filter in self.postEncodingFilters: filter.process(encoder=self.encoder, data=outputs['dataOut']) # Populate the output numpy arrays; must assign by index. outputs['resetOut'][0] = reset outputs['sequenceIdOut'][0] = sequenceId self.populateCategoriesOut(categories, outputs['categoryOut']) # ------------------------------------------------------------------------ # Verbose print? if self.verbosity >= 1: if self._iterNum == 0: self.encoder.pprintHeader(prefix="sensor:") if reset: print "RESET - sequenceID:%d" % sequenceId if self.verbosity >= 2: print # If verbosity >=2, print the record fields if self.verbosity >= 1: self.encoder.pprint(outputs["dataOut"], prefix="%7d:" % (self._iterNum)) scalarValues = self.encoder.getScalars(data) nz = outputs["dataOut"].nonzero()[0] print " nz: (%d)" % (len(nz)), nz print " encIn:", self.encoder.scalarsToStr(scalarValues) if self.verbosity >= 2: #if hasattr(data, 'header'): # header = data.header() #else: # header = ' '.join(self.dataSource.names) #print " ", header print " data:", str(data) if self.verbosity >= 3: decoded = self.encoder.decode(outputs["dataOut"]) print "decoded:", self.encoder.decodedToStr(decoded) self._iterNum += 1 else: # ========================================================================= # Spatial # ========================================================================= # This is the top down compute in sensor # We get the spatial pooler's topDownOut as spatialTopDownIn spatialTopDownIn = inputs['spatialTopDownIn'] spatialTopDownOut = self.encoder.topDownCompute(spatialTopDownIn) # ----------------------------------------------------------------------- # Split topDownOutput into seperate outputs values = [elem.value for elem in spatialTopDownOut] scalars = [elem.scalar for elem in spatialTopDownOut] encodings = [elem.encoding for elem in spatialTopDownOut] self._outputValues['spatialTopDownOut'] = values outputs['spatialTopDownOut'][:] = numpy.array(scalars) self._outputValues['spatialTopDownEncodings'] = encodings # ========================================================================= # Temporal # ========================================================================= ## TODO: Add temporal top-down loop # We get the temporal pooler's topDownOut passed through the spatial pooler # as temporalTopDownIn temporalTopDownIn = inputs['temporalTopDownIn'] temporalTopDownOut = self.encoder.topDownCompute(temporalTopDownIn) # ----------------------------------------------------------------------- # Split topDownOutput into seperate outputs values = [elem.value for elem in temporalTopDownOut] scalars = [elem.scalar for elem in temporalTopDownOut] encodings = [elem.encoding for elem in temporalTopDownOut] self._outputValues['temporalTopDownOut'] = values outputs['temporalTopDownOut'][:] = numpy.array(scalars) self._outputValues['temporalTopDownEncodings'] = encodings assert(len(spatialTopDownOut) == len(temporalTopDownOut), "Error: " "spatialTopDownOut and temporalTopDownOut should be the same size") def _convertNonNumericData(self, spatialOutput, temporalOutput, output): """ Converts all of the non-numeric fields from spatialOutput and temporalOutput into their scalar equivalents and records them in the output dictionary. Parameters: ----------------------------------------------------------------------- spatialOutput: The results of topDownCompute() for the spatial input temporalOutput The results of topDownCompute() for the temporal input output: The main dictionary of outputs passed to compute() It is exepected to have keys 'spatialTopDownOut' and 'temporalTopDownOut' that are mapped to numpy arrays """ encoders = self.encoder.getEncoderList() types = self.encoder.getDecoderOutputFieldTypes() for i, (encoder, type) in enumerate(zip(encoders, types)): spatialData = spatialOutput[i] temporalData = temporalOutput[i] if type != FieldMetaType.integer and type != FieldMetaType.float: # TODO: Make sure that this doesn't modify any state spatialData = encoder.getScalars(spatialData)[0] temporalData = encoder.getScalars(temporalData)[0] assert isinstance(spatialData, (float, int)) assert isinstance(temporalData, (float, int)) output['spatialTopDownOut'][i] = spatialData output['temporalTopDownOut'][i] = temporalData def getOutputValues(self, outputName): """Return the dictionary of output values. Note that these are normal Python lists, rather than numpy arrays. This is to support lists with mixed scalars and strings, as in the case of records with categorical variables """ return self._outputValues[outputName] def getOutputElementCount(self, name): """ Computes the width of dataOut """ if name == "resetOut": print "WARNING: getOutputElementCount should not have been called with " \ "resetOut" return 1 elif name == "sequenceIdOut": print "WARNING: getOutputElementCount should not have been called with " \ "sequenceIdOut" return 1 elif name == "dataOut": if self.encoder == None: raise Exception("NuPIC requested output element count for 'dataOut' on a " "RecordSensor node, but the encoder has not been set") return self.encoder.getWidth() elif name == "sourceOut": if self.encoder == None: raise Exception("NuPIC requested output element count for 'sourceOut' " "on a RecordSensor node, but the encoder has not been set") return len(self.encoder.getDescription()) elif name == "categoryOut": return self.numCategories elif name == 'spatialTopDownOut' or name == 'temporalTopDownOut': if self.encoder == None: raise Exception("NuPIC requested output element count for 'sourceOut' " "on a RecordSensor node, but the encoder has not been set") return len(self.encoder.getDescription()) else: raise Exception("Unknown output %s" % name) def setParameter(self, parameterName, index, parameterValue): """ Set the value of a Spec parameter. Most parameters are handled automatically by PyRegion's parameter set mechanism. The ones that need special treatment are explicitly handled here. """ if parameterName == 'topDownMode': self.topDownMode = parameterValue else: raise Exception('Unknown parameter: ' + parameterName)
jaredweiss/nupic
nupic/regions/RecordSensor.py
Python
gpl-3.0
18,730
import torch import torch.nn as nn import os import numpy as np import torchvision from torch.utils.data import DataLoader import torchvision.transforms as transforms import ray from ray import tune from ray.tune.schedulers import create_scheduler from ray.tune.integration.horovod import (DistributedTrainableCreator, distributed_checkpoint_dir) from ray.util.sgd.torch.resnet import ResNet18 from ray.tune.utils.release_test_util import ProgressCallback CIFAR10_STATS = { "mean": (0.4914, 0.4822, 0.4465), "std": (0.2023, 0.1994, 0.2010), } def train(config, checkpoint_dir=None): import horovod.torch as hvd hvd.init() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") net = ResNet18(None).to(device) optimizer = torch.optim.SGD( net.parameters(), lr=config["lr"], ) epoch = 0 if checkpoint_dir: with open(os.path.join(checkpoint_dir, "checkpoint")) as f: model_state, optimizer_state, epoch = torch.load(f) net.load_state_dict(model_state) optimizer.load_state_dict(optimizer_state) criterion = nn.CrossEntropyLoss() optimizer = hvd.DistributedOptimizer(optimizer) np.random.seed(1 + hvd.rank()) torch.manual_seed(1234) # To ensure consistent initialization across slots, hvd.broadcast_parameters(net.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) trainset = ray.get(config["data"]) trainloader = DataLoader( trainset, batch_size=int(config["batch_size"]), shuffle=True, num_workers=4) for epoch in range(epoch, 40): # loop over the dataset multiple times running_loss = 0.0 epoch_steps = 0 for i, data in enumerate(trainloader): # get the inputs; data is a list of [inputs, labels] inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() epoch_steps += 1 tune.report(loss=running_loss / epoch_steps) if i % 2000 == 1999: # print every 2000 mini-batches print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / epoch_steps)) with distributed_checkpoint_dir(step=epoch) as checkpoint_dir: print("this checkpoint dir: ", checkpoint_dir) path = os.path.join(checkpoint_dir, "checkpoint") torch.save((net.state_dict(), optimizer.state_dict(), epoch), path) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument( "--smoke-test", action="store_true", help=("Finish quickly for testing.")) args = parser.parse_args() if args.smoke_test: ray.init() else: ray.init(address="auto") # assumes ray is started with ray up horovod_trainable = DistributedTrainableCreator( train, use_gpu=False if args.smoke_test else True, num_hosts=1 if args.smoke_test else 2, num_slots=2 if args.smoke_test else 2, replicate_pem=False) transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(CIFAR10_STATS["mean"], CIFAR10_STATS["std"]), ]) # meanstd transformation dataset = torchvision.datasets.CIFAR10( root="/tmp/data_cifar", train=True, download=True, transform=transform_train) # ensure that checkpointing works. pbt = create_scheduler( "pbt", perturbation_interval=2, hyperparam_mutations={ "lr": tune.uniform(0.001, 0.1), }) analysis = tune.run( horovod_trainable, metric="loss", mode="min", keep_checkpoints_num=1, scheduler=pbt, config={ "lr": 0.1 if args.smoke_test else tune.grid_search([0.1 * i for i in range(1, 10)]), "batch_size": 64, "data": ray.put(dataset) }, num_samples=1, stop={"training_iteration": 1} if args.smoke_test else None, callbacks=[ProgressCallback()], # FailureInjectorCallback() fail_fast=True, ) print("Best hyperparameters found were: ", analysis.best_config)
pcmoritz/ray-1
release/horovod_tests/workloads/horovod_test.py
Python
apache-2.0
4,752
# Copyright 2016 Stanislav Krotov <https://it-projects.info/team/ufaks> # Copyright 2016 manawi <https://github.com/manawi> # Copyright 2019 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr> # License MIT (https://opensource.org/licenses/MIT). from odoo import api, fields, models class PosConfig(models.Model): _inherit = "pos.config" @api.model def _default_negative_stock_user(self): return self.env.ref("point_of_sale.group_pos_manager") negative_order_group_id = fields.Many2one( "res.groups", string="Negative Order Group", default=_default_negative_stock_user, help="Group allows to sell products which are out of a stock.", ) negative_order_manager_permission = fields.Boolean( "Managers Permission", help="Ask Managers Permission to proceed order with negative stock products", default=True, ) negative_order_warning = fields.Boolean( "Show Warning", help="Show Warning on adding out of stock products", default=False, ) class PosOrder(models.Model): _inherit = "pos.order" negative_stock_user_id = fields.Many2one( "res.users", string="Negative stock approval", help="Person who authorized a sale with a product which is out of a stock", ) @api.model def _order_fields(self, ui_order): res = super(PosOrder, self)._order_fields(ui_order) res["negative_stock_user_id"] = ui_order.get("negative_stock_user_id", False) return res
it-projects-llc/pos-addons
pos_product_available_negative/models.py
Python
mit
1,555
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import errno import functools import logging import os import shutil import subprocess import sys import tempfile import threading import time import weakref from oslo.config import cfg from neutron_fwaas.openstack.common import fileutils from neutron_fwaas.openstack.common._i18n import _, _LE, _LI LOG = logging.getLogger(__name__) util_opts = [ cfg.BoolOpt('disable_process_locking', default=False, help='Enables or disables inter-process locks.'), cfg.StrOpt('lock_path', default=os.environ.get("NEUTRON_FWAAS_LOCK_PATH"), help='Directory to use for lock files.') ] CONF = cfg.CONF CONF.register_opts(util_opts) def set_defaults(lock_path): cfg.set_defaults(util_opts, lock_path=lock_path) class _FileLock(object): """Lock implementation which allows multiple locks, working around issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does not require any cleanup. Since the lock is always held on a file descriptor rather than outside of the process, the lock gets dropped automatically if the process crashes, even if __exit__ is not executed. There are no guarantees regarding usage by multiple green threads in a single process here. This lock works only between processes. Exclusive access between local threads should be achieved using the semaphores in the @synchronized decorator. Note these locks are released when the descriptor is closed, so it's not safe to close the file descriptor while another green thread holds the lock. Just opening and closing the lock file can break synchronisation, so lock files must be accessed only using this abstraction. """ def __init__(self, name): self.lockfile = None self.fname = name def acquire(self): basedir = os.path.dirname(self.fname) if not os.path.exists(basedir): fileutils.ensure_tree(basedir) LOG.info(_LI('Created lock path: %s'), basedir) self.lockfile = open(self.fname, 'w') while True: try: # Using non-blocking locks since green threads are not # patched to deal with blocking locking calls. # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() LOG.debug('Got file lock "%s"', self.fname) return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: raise threading.ThreadError(_("Unable to acquire lock on" " `%(filename)s` due to" " %(exception)s") % {'filename': self.fname, 'exception': e}) def __enter__(self): self.acquire() return self def release(self): try: self.unlock() self.lockfile.close() LOG.debug('Released file lock "%s"', self.fname) except IOError: LOG.exception(_LE("Could not release the acquired lock `%s`"), self.fname) def __exit__(self, exc_type, exc_val, exc_tb): self.release() def exists(self): return os.path.exists(self.fname) def trylock(self): raise NotImplementedError() def unlock(self): raise NotImplementedError() class _WindowsLock(_FileLock): def trylock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) def unlock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) class _FcntlLock(_FileLock): def trylock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) def unlock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_UN) if os.name == 'nt': import msvcrt InterProcessLock = _WindowsLock else: import fcntl InterProcessLock = _FcntlLock _semaphores = weakref.WeakValueDictionary() _semaphores_lock = threading.Lock() def _get_lock_path(name, lock_file_prefix, lock_path=None): # NOTE(mikal): the lock name cannot contain directory # separators name = name.replace(os.sep, '_') if lock_file_prefix: sep = '' if lock_file_prefix.endswith('-') else '-' name = '%s%s%s' % (lock_file_prefix, sep, name) local_lock_path = lock_path or CONF.lock_path if not local_lock_path: raise cfg.RequiredOptError('lock_path') return os.path.join(local_lock_path, name) def external_lock(name, lock_file_prefix=None, lock_path=None): LOG.debug('Attempting to grab external lock "%(lock)s"', {'lock': name}) lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) return InterProcessLock(lock_file_path) def remove_external_lock_file(name, lock_file_prefix=None): """Remove an external lock file when it's not used anymore This will be helpful when we have a lot of lock files """ with internal_lock(name): lock_file_path = _get_lock_path(name, lock_file_prefix) try: os.remove(lock_file_path) except OSError: LOG.info(_LI('Failed to remove file %(file)s'), {'file': lock_file_path}) def internal_lock(name): with _semaphores_lock: try: sem = _semaphores[name] LOG.debug('Using existing semaphore "%s"', name) except KeyError: sem = threading.Semaphore() _semaphores[name] = sem LOG.debug('Created new semaphore "%s"', name) return sem @contextlib.contextmanager def lock(name, lock_file_prefix=None, external=False, lock_path=None): """Context based lock This function yields a `threading.Semaphore` instance (if we don't use eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. """ int_lock = internal_lock(name) with int_lock: LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name}) try: if external and not CONF.disable_process_locking: ext_lock = external_lock(name, lock_file_prefix, lock_path) with ext_lock: yield ext_lock else: yield int_lock finally: LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name}) def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): """Synchronization decorator. Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one thread will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. """ def wrap(f): @functools.wraps(f) def inner(*args, **kwargs): try: with lock(name, lock_file_prefix, external, lock_path): LOG.debug('Got semaphore / lock "%(function)s"', {'function': f.__name__}) return f(*args, **kwargs) finally: LOG.debug('Semaphore / lock released "%(function)s"', {'function': f.__name__}) return inner return wrap def synchronized_with_prefix(lock_file_prefix): """Partial object generator for the synchronization decorator. Redefine @synchronized in each project like so:: (in nova/utils.py) from nova.openstack.common import lockutils synchronized = lockutils.synchronized_with_prefix('nova-') (in nova/foo.py) from nova import utils @utils.synchronized('mylock') def bar(self, *args): ... The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. """ return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) def main(argv): """Create a dir for locks and pass it to command from arguments If you run this: python -m openstack.common.lockutils python setup.py testr <etc> a temporary directory will be created for all your locks and passed to all your tests in an environment variable. The temporary dir will be deleted afterwards and the return value will be preserved. """ lock_dir = tempfile.mkdtemp() os.environ["NEUTRON_FWAAS_LOCK_PATH"] = lock_dir try: ret_val = subprocess.call(argv[1:]) finally: shutil.rmtree(lock_dir, ignore_errors=True) return ret_val if __name__ == '__main__': sys.exit(main(sys.argv))
citrix-openstack-build/neutron-fwaas
neutron_fwaas/openstack/common/lockutils.py
Python
apache-2.0
10,316
import os import sys import csv import gzip import pickle import numpy as np from ..density import ProbDensityHistogram class ProbAbsoluteReflectance(object): """ Implements the absolute reflectance term p(R_x). """ def __init__(self, params): self.params = params self._load() def cost(self, r_nz): if self.params.abs_reflectance_weight: return self.params.abs_reflectance_weight * \ (-self.density.logprob(np.log(r_nz))) else: return 0 def _load(self): if self.params.logging: print("loading reflectances...") cur_dir = os.path.dirname(os.path.abspath(__file__)) data_filename = os.path.join(cur_dir, 'prob_abs_r.dat') if not os.path.exists(data_filename): rows = [] to_id = {} with open(os.path.join(cur_dir, 'bsdfs.csv'), 'rb') as csvfile: first = True for row in csv.reader(csvfile): if first: to_id = {name: i for i, name in enumerate(row)} first = False else: if row[to_id['colored_reflection']] == 'False': r = float(row[to_id['rho_d_r']]) g = float(row[to_id['rho_d_g']]) b = float(row[to_id['rho_d_b']]) if r > 1e-4 and g > 1e-4 and b > 1e-4: rows.append([r, g, b]) data_raw = np.array(rows) data = np.clip(np.log(data_raw), np.log(1e-4), 0) self.density = ProbDensityHistogram() self.density.train(data, bins=100, bandwidth=3) print('dump:', data_filename) pickle.dump( self.density, open(data_filename, "wb") ) else: # make sure that 'bell2014.density' is on the path so that it # unpickles correclty path = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) if path not in sys.path: sys.path.append(path) print('load:', data_filename) self.density = pickle.load(open(data_filename, "rb")) if self.params.logging: print("loaded reflectances") ############### # import os # import sys # import csv # import gzip # try: # import cPickle as pickle # except ImportError: # import pickle # import numpy as np # from ..density import ProbDensityHistogram # class ProbAbsoluteReflectance(object): # """ Implements the absolute reflectance term p(R_x). """ # def __init__(self, params): # self.params = params # self._load() # def cost(self, r_nz): # if self.params.abs_reflectance_weight: # return self.params.abs_reflectance_weight * \ # (-self.density.logprob(np.log(r_nz))) # else: # return 0 # def _load(self): # if self.params.logging: # print("loading reflectances...") # cur_dir = os.path.dirname(os.path.abspath(__file__)) # data_filename = os.path.join(cur_dir, 'prob_abs_r.dat') # if not os.path.exists(data_filename): # rows = [] # to_id = {} # with open(os.path.join(cur_dir, 'bsdfs.csv'), 'rb') as csvfile: # first = True # for row in csv.reader(csvfile): # if first: # to_id = {name: i for i, name in enumerate(row)} # first = False # else: # if row[to_id['colored_reflection']] == 'False': # r = float(row[to_id['rho_d_r']]) # g = float(row[to_id['rho_d_g']]) # b = float(row[to_id['rho_d_b']]) # if r > 1e-4 and g > 1e-4 and b > 1e-4: # rows.append([r, g, b]) # data_raw = np.array(rows) # data = np.clip(np.log(data_raw), np.log(1e-4), 0) # self.density = ProbDensityHistogram() # self.density.train(data, bins=100, bandwidth=3) # cPickle.dump( # obj=self.density, # file=gzip.open(data_filename, "wb"), # protocol=cPickle.HIGHEST_PROTOCOL # ) # else: # # make sure that 'bell2014.density' is on the path so that it # # unpickles correclty # path = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) # if path not in sys.path: # sys.path.append(path) # self.density = cPickle.load(gzip.open(data_filename, "rb")) # if self.params.logging: # print("loaded reflectances")
tinghuiz/learn-reflectance
bell2014/energy/prob_abs_r.py
Python
mit
4,912
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ __author__ = "Rubens Pinheiro Gonçalves Cavalcante" __date__ = "08/05/13 19:18" __licence__ = "GPLv3" __email__ = "rubenspgcavalcante@gmail.com" from core.event import * from core.controller import * class CPUSpinnerController(Controller): """...""" def __init__(self): Controller.__init__(self) self.keepGoing = 1 self.bind(QuitEvent(), self.stop) def defaultAction(self): pass def run(self): self.trigger(AppStartEvent()) while self.keepGoing: self.trigger(TickEvent()) def stop(self, event): #this will stop the while loop from running self.keepGoing = False
rubenspgcavalcante/pygameoflife
controllers/cpuspinner_controller.py
Python
gpl-3.0
1,197
"""vlan_pool PK to bigint Revision ID: e06576b2ea9e Revises: 9089fa811a2b Create Date: 2017-07-21 16:34:50.005560 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'e06576b2ea9e' down_revision = None branch_labels = ('hil.ext.network_allocators.vlan_pool',) # pylint: disable=missing-docstring def upgrade(): op.alter_column('vlan', 'id', existing_type=sa.Integer(), type_=sa.BIGINT()) def downgrade(): op.alter_column('vlan', 'id', existing_type=sa.BIGINT(), type_=sa.INTEGER())
SahilTikale/haas
hil/ext/network_allocators/migrations/vlan_pool/e06576b2ea9e_vlan_pool_pk_to_bigint.py
Python
apache-2.0
631
from ppillar import PublicPillar from contextlib import contextmanager import os import shutil import stat import tempfile import unittest import yaml try: from unittest import skipIf except ImportError: # Python 2.6 from unittest2 import skipIf @contextmanager def ignored(*exceptions): try: yield except exceptions: pass class SecurePillarTest(unittest.TestCase): def setUp(self): self.enc_ppillar = PublicPillar(os.path.join('test-data', 'key2048.pub')) self.dec_ppillar = PublicPillar(os.path.join('test-data', 'key2048.pem')) def test_ppillar(self): input_values = [ # Plain key-value dict 'secretstuff', # Long value over several lines ('secret'*100 + '\n')*20, # Random binary data str([os.urandom(10) for i in range(10)]), ] for input_value in input_values: encrypted = self.enc_ppillar.encrypt(input_value) plaintext = { 'secret_data': input_value, } ciphertext = { 'secret_data': encrypted, } self.assertEqual(self.dec_ppillar.decrypt_dict(ciphertext), plaintext) class ShortKeyTest(unittest.TestCase): def setUp(self): self.ppillar = PublicPillar(os.path.join('test-data', 'key1024.pem')) def test_encrypt_long_string(self): data = 'secret'*100 self.assertRaises(ValueError, self.ppillar.encrypt, data) class EncryptedPrivateKeyTest(unittest.TestCase): def setUp(self): self.ppillar = PublicPillar(os.path.join('test-data', 'key2048enc.pem'), passphrase='test') def test_encrypted_private_key(self): expected_plaintext = { 'database': { 'password': 'supersecretdbpassword', }, 'webserver': { 'secret_key': 'signstuffwiththiskey', } } with open(os.path.join('test-data', 'ciphertext.yml')) as fh: enc_data = yaml.load(fh) decrypted_plaintext = self.ppillar.decrypt_dict(enc_data) self.assertEqual(decrypted_plaintext, expected_plaintext) class WrongKeyTest(unittest.TestCase): def test_wrong_pass_encrypted_key(self): self.assertRaises(ValueError, PublicPillar, os.path.join('test-data', 'key2048enc.pem'), passphrase='foo') @skipIf(os.name == 'nt', 'File permission tests are not run on windows due to ' 'an unsupported security model') class FilePermissionsTest(unittest.TestCase): def setUp(self): self.ppillar = PublicPillar(os.path.join('test-data', 'key2048.pem')) self.target_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.target_dir, ignore_errors=True) def _assert_all_files_in_dir_are_0600(self, directory): at_least_one_result = False for dirpath, dirnames, filenames in os.walk(self.target_dir): for filename in filenames: st_mode = os.stat(os.path.join(self.target_dir, dirpath, filename)).st_mode mode = stat.S_IMODE(st_mode) self.assertEqual(mode, 0o600) at_least_one_result = True self.assertTrue(at_least_one_result) def test_correct_decrypted_permissions(self): source_dir = os.path.join('test-data', 'encrypted_dir') self.ppillar.decrypt_directory(source_dir, self.target_dir) self._assert_all_files_in_dir_are_0600(self.target_dir) def test_correct_permissions_on_existing_file(self): source_dir = os.path.join('test-data', 'encrypted_dir') word_readable_file = os.path.join(self.target_dir, 'database.yml') with open(word_readable_file, 'w') as fh: pass os.chmod(word_readable_file, 0o644) self.ppillar.decrypt_directory(source_dir, self.target_dir) self._assert_all_files_in_dir_are_0600(self.target_dir) def test_does_not_allow_existing_file_descriptors_to_read_contents(self): source_dir = os.path.join('test-data', 'encrypted_dir') word_readable_file = os.path.join(self.target_dir, 'database.yml') with open(word_readable_file, 'wb') as fh: fh.write(b'prepillarcontents') os.chmod(word_readable_file, 0o644) word_readable_file_fd = open(word_readable_file, 'rb', 0) self.ppillar.decrypt_directory(source_dir, self.target_dir) self._assert_all_files_in_dir_are_0600(self.target_dir) # When the file already existed, it should have created a new file descriptor # in the target location, which means that the contents we can read from the old # one is not the sensitive data in the new file self.assertEqual(word_readable_file_fd.read(), b'prepillarcontents')
thusoy/public-pillar
test_ppillar.py
Python
mit
4,849
from __future__ import division import numpy as np import scipy.sparse as sp from scipy.constants import epsilon_0 from ...utils.code_utils import deprecate_class from ...fields import TimeFields from ...utils import mkvc, sdiag, Zero from ..utils import omega class FieldsTDEM(TimeFields): """ Fancy Field Storage for a TDEM simulation. Only one field type is stored for each problem, the rest are computed. The fields obejct acts like an array and is indexed by .. code-block:: python f = problem.fields(m) e = f[source_list,'e'] b = f[source_list,'b'] If accessing all sources for a given field, use the :code:`:` .. code-block:: python f = problem.fields(m) e = f[:,'e'] b = f[:,'b'] The array returned will be size (nE or nF, nSrcs :math:`\\times` nFrequencies) """ knownFields = {} dtype = float def _GLoc(self, fieldType): """Grid location of the fieldType""" return self.aliasFields[fieldType][1] def _eDeriv(self, tInd, src, dun_dm_v, v, adjoint=False): if adjoint is True: return ( self._eDeriv_u(tInd, src, v, adjoint), self._eDeriv_m(tInd, src, v, adjoint), ) return self._eDeriv_u(tInd, src, dun_dm_v) + self._eDeriv_m(tInd, src, v) def _bDeriv(self, tInd, src, dun_dm_v, v, adjoint=False): if adjoint is True: return ( self._bDeriv_u(tInd, src, v, adjoint), self._bDeriv_m(tInd, src, v, adjoint), ) return self._bDeriv_u(tInd, src, dun_dm_v) + self._bDeriv_m(tInd, src, v) def _dbdtDeriv(self, tInd, src, dun_dm_v, v, adjoint=False): if adjoint is True: return ( self._dbdtDeriv_u(tInd, src, v, adjoint), self._dbdtDeriv_m(tInd, src, v, adjoint), ) return self._dbdtDeriv_u(tInd, src, dun_dm_v) + self._dbdtDeriv_m(tInd, src, v) def _hDeriv(self, tInd, src, dun_dm_v, v, adjoint=False): if adjoint is True: return ( self._hDeriv_u(tInd, src, v, adjoint), self._hDeriv_m(tInd, src, v, adjoint), ) return self._hDeriv_u(tInd, src, dun_dm_v) + self._hDeriv_m(tInd, src, v) def _dhdtDeriv(self, tInd, src, dun_dm_v, v, adjoint=False): if adjoint is True: return ( self._dhdtDeriv_u(tInd, src, v, adjoint), self._dhdtDeriv_m(tInd, src, v, adjoint), ) return self._dhdtDeriv_u(tInd, src, dun_dm_v) + self._dhdtDeriv_m(tInd, src, v) def _jDeriv(self, tInd, src, dun_dm_v, v, adjoint=False): if adjoint is True: return ( self._jDeriv_u(tInd, src, v, adjoint), self._jDeriv_m(tInd, src, v, adjoint), ) return self._jDeriv_u(tInd, src, dun_dm_v) + self._jDeriv_m(tInd, src, v) class FieldsDerivativesEB(FieldsTDEM): """ A fields object for satshing derivs in the EB formulation """ knownFields = { "bDeriv": "F", "eDeriv": "E", "hDeriv": "F", "jDeriv": "E", "dbdtDeriv": "F", "dhdtDeriv": "F", } class FieldsDerivativesHJ(FieldsTDEM): """ A fields object for satshing derivs in the HJ formulation """ knownFields = { "bDeriv": "E", "eDeriv": "F", "hDeriv": "E", "jDeriv": "F", "dbdtDeriv": "E", "dhdtDeriv": "E", } class Fields3DMagneticFluxDensity(FieldsTDEM): """Field Storage for a TDEM simulation.""" knownFields = {"bSolution": "F"} aliasFields = { "b": ["bSolution", "F", "_b"], "h": ["bSolution", "F", "_h"], "e": ["bSolution", "E", "_e"], "j": ["bSolution", "E", "_j"], "dbdt": ["bSolution", "F", "_dbdt"], "dhdt": ["bSolution", "F", "_dhdt"], } def startup(self): self._times = self.simulation.times self._MeSigma = self.simulation.MeSigma self._MeSigmaI = self.simulation.MeSigmaI self._MeSigmaDeriv = self.simulation.MeSigmaDeriv self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self._edgeCurl = self.simulation.mesh.edgeCurl self._MfMui = self.simulation.MfMui self._timeMesh = self.simulation.time_mesh def _TLoc(self, fieldType): return "N" def _b(self, bSolution, source_list, tInd): return bSolution def _bDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): return dun_dm_v def _bDeriv_m(self, tInd, src, v, adjoint=False): return Zero() def _dbdt(self, bSolution, source_list, tInd): # self._timeMesh.faceDiv dbdt = -self._edgeCurl * self._e(bSolution, source_list, tInd) for i, src in enumerate(source_list): s_m = src.s_m(self.simulation, self._times[tInd]) dbdt[:, i] = dbdt[:, i] + s_m return dbdt def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint is True: return -self._eDeriv_u(tInd, src, self._edgeCurl.T * dun_dm_v, adjoint) return -(self._edgeCurl * self._eDeriv_u(tInd, src, dun_dm_v)) def _dbdtDeriv_m(self, tInd, src, v, adjoint=False): if adjoint is True: return -(self._eDeriv_m(tInd, src, self._edgeCurl.T * v, adjoint)) return -( self._edgeCurl * self._eDeriv_m(tInd, src, v) ) # + src.s_mDeriv() assuming src doesn't have deriv for now def _e(self, bSolution, source_list, tInd): e = self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * bSolution)) for i, src in enumerate(source_list): s_e = src.s_e(self.simulation, self._times[tInd]) e[:, i] = e[:, i] - self._MeSigmaI * s_e return e def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint is True: return self._MfMui.T * (self._edgeCurl * (self._MeSigmaI.T * dun_dm_v)) return self._MeSigmaI * (self._edgeCurl.T * (self._MfMui * dun_dm_v)) def _eDeriv_m(self, tInd, src, v, adjoint=False): _, s_e = src.eval(self.simulation, self._times[tInd]) bSolution = self[[src], "bSolution", tInd].flatten() _, s_eDeriv = src.evalDeriv(self._times[tInd], self, adjoint=adjoint) if adjoint is True: return self._MeSigmaIDeriv( -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint ) - s_eDeriv(self._MeSigmaI.T * v) return self._MeSigmaIDeriv( -s_e + self._edgeCurl.T * (self._MfMui * bSolution), v, adjoint ) - self._MeSigmaI * s_eDeriv(v) def _j(self, hSolution, source_list, tInd): return self.simulation.MeI * ( self._MeSigma * self._e(hSolution, source_list, tInd) ) def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._eDeriv_u( tInd, src, self._MeSigma.T * (self.simulation.MeI.T * dun_dm_v), adjoint=True, ) return self.simulation.MeI * ( self._MeSigma * self._eDeriv_u(tInd, src, dun_dm_v) ) def _jDeriv_m(self, tInd, src, v, adjoint=False): e = self[src, "e", tInd] if adjoint: w = self.simulation.MeI.T * v return self._MeSigmaDeriv(e).T * w + self._eDeriv_m( tInd, src, self._MeSigma.T * w, adjoint=True ) return self.simulation.MeI * ( self._MeSigmaDeriv(e) * v + self._MeSigma * self._eDeriv_m(tInd, src, v) ) def _h(self, hSolution, source_list, tInd): return self.simulation.MfI * ( self._MfMui * self._b(hSolution, source_list, tInd) ) def _hDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._bDeriv_u( tInd, src, self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), adjoint=True, ) return self.simulation.MfI * (self._MfMui * self._bDeriv_u(tInd, src, dun_dm_v)) def _hDeriv_m(self, tInd, src, v, adjoint=False): if adjoint: return self._bDeriv_m( tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True ) return self.simulation.MfI * (self._MfMui * self._bDeriv_m(tInd, src, v)) def _dhdt(self, hSolution, source_list, tInd): return self.simulation.MfI * ( self._MfMui * self._dbdt(hSolution, source_list, tInd) ) def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._dbdtDeriv_u( tInd, src, self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), adjoint=True, ) return self.simulation.MfI * ( self._MfMui * self._dbdtDeriv_u(tInd, src, dun_dm_v) ) def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): if adjoint: return self._dbdtDeriv_m( tInd, src, self._MfMui.T * (self.simulation.MfI.T * v), adjoint=True ) return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) class Fields3DElectricField(FieldsTDEM): """Fancy Field Storage for a TDEM simulation.""" knownFields = {"eSolution": "E"} aliasFields = { "e": ["eSolution", "E", "_e"], "j": ["eSolution", "E", "_j"], "b": ["eSolution", "F", "_b"], # 'h': ['eSolution', 'F', '_h'], "dbdt": ["eSolution", "F", "_dbdt"], "dhdt": ["eSolution", "F", "_dhdt"], } def startup(self): self._times = self.simulation.times self._MeSigma = self.simulation.MeSigma self._MeSigmaI = self.simulation.MeSigmaI self._MeSigmaDeriv = self.simulation.MeSigmaDeriv self._MeSigmaIDeriv = self.simulation.MeSigmaIDeriv self._edgeCurl = self.simulation.mesh.edgeCurl self._MfMui = self.simulation.MfMui def _TLoc(self, fieldType): return "N" def _e(self, eSolution, source_list, tInd): return eSolution def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): return dun_dm_v def _eDeriv_m(self, tInd, src, v, adjoint=False): return Zero() def _dbdt(self, eSolution, source_list, tInd): s_m = np.zeros((self.mesh.nF, len(source_list))) for i, src in enumerate(source_list): s_m_src = src.s_m(self.simulation, self._times[tInd]) s_m[:, i] = s_m[:, i] + s_m_src return s_m - self._edgeCurl * eSolution def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return -self._edgeCurl.T * dun_dm_v return -self._edgeCurl * dun_dm_v def _dbdtDeriv_m(self, tInd, src, v, adjoint=False): # s_mDeriv = src.s_mDeriv( # self._times[tInd], self, adjoint=adjoint # ) return Zero() # assumes source doesn't depend on model def _b(self, eSolution, source_list, tInd): """ Integrate _db_dt using rectangles """ raise NotImplementedError( "To obtain b-fields, please use Simulation3DMagneticFluxDensity" ) # dbdt = self._dbdt(eSolution, source_list, tInd) # dt = self.simulation.time_mesh.hx # # assume widths of "ghost cells" same on either end # dtn = np.hstack([dt[0], 0.5*(dt[1:] + dt[:-1]), dt[-1]]) # return dtn[tInd] * dbdt # # raise NotImplementedError def _j(self, eSolution, source_list, tInd): return self.simulation.MeI * ( self._MeSigma * self._e(eSolution, source_list, tInd) ) def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._eDeriv_u( tInd, src, self._MeSigma.T * (self.simulation.MeI.T * dun_dm_v), adjoint=True, ) return self.simulation.MeI * ( self._MeSigma * self._eDeriv_u(tInd, src, dun_dm_v) ) def _jDeriv_m(self, tInd, src, v, adjoint=False): e = self[src, "e", tInd] if adjoint: w = self.simulation.MeI.T * v return self._MeSigmaDeriv(e).T * w + self._eDeriv_m( tInd, src, self._MeSigma.T * w, adjoint=True ) return self.simulation.MeI * ( self._MeSigmaDeriv(e) * v + self._MeSigma * self._eDeriv_m(tInd, src, v) ) def _dhdt(self, eSolution, source_list, tInd): return self.simulation.MfI * ( self._MfMui * self._dbdt(eSolution, source_list, tInd) ) def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._dbdtDeriv_u( tInd, src, self._MfMui.T * (self.simulation.MfI.T * dun_dm_v), adjoint=True, ) return self.simulation.MfI * ( self._MfMui * self._dbdtDeriv_u(tInd, src, dun_dm_v) ) def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): if adjoint: return self._dbdtDeriv_m( tInd, src, self._MfMui.T * (self.simulation.MfI.T * v) ) return self.simulation.MfI * (self._MfMui * self._dbdtDeriv_m(tInd, src, v)) class Fields3DMagneticField(FieldsTDEM): """Fancy Field Storage for a TDEM simulation.""" knownFields = {"hSolution": "E"} aliasFields = { "h": ["hSolution", "E", "_h"], "b": ["hSolution", "E", "_b"], "dhdt": ["hSolution", "E", "_dhdt"], "dbdt": ["hSolution", "E", "_dbdt"], "j": ["hSolution", "F", "_j"], "e": ["hSolution", "F", "_e"], "charge": ["hSolution", "CC", "_charge"], } def startup(self): self._times = self.simulation.times self._edgeCurl = self.simulation.mesh.edgeCurl self._MeMuI = self.simulation.MeMuI self._MeMu = self.simulation.MeMu self._MfRho = self.simulation.MfRho self._MfRhoDeriv = self.simulation.MfRhoDeriv def _TLoc(self, fieldType): # if fieldType in ['h', 'j']: return "N" # else: # raise NotImplementedError def _h(self, hSolution, source_list, tInd): return hSolution def _hDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): return dun_dm_v def _hDeriv_m(self, tInd, src, v, adjoint=False): return Zero() def _dhdt(self, hSolution, source_list, tInd): C = self._edgeCurl MeMuI = self._MeMuI MfRho = self._MfRho dhdt = -MeMuI * (C.T * (MfRho * (C * hSolution))) for i, src in enumerate(source_list): s_m, s_e = src.eval(self.simulation, self._times[tInd]) dhdt[:, i] = MeMuI * (C.T * MfRho * s_e + s_m) + dhdt[:, i] return dhdt def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): C = self._edgeCurl MeMuI = self._MeMuI MfRho = self._MfRho if adjoint: return -C.T * (MfRho.T * (C * (MeMuI * dun_dm_v))) return -MeMuI * (C.T * (MfRho * (C * dun_dm_v))) def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): C = self._edgeCurl MeMuI = self._MeMuI MfRho = self._MfRho MfRhoDeriv = self._MfRhoDeriv hSolution = self[[src], "hSolution", tInd].flatten() s_e = src.s_e(self.simulation, self._times[tInd]) if adjoint: return -MfRhoDeriv(C * hSolution - s_e, (C * (MeMuI * v)), adjoint) return -MeMuI * (C.T * (MfRhoDeriv(C * hSolution - s_e, v, adjoint))) def _j(self, hSolution, source_list, tInd): s_e = np.zeros((self.mesh.nF, len(source_list))) for i, src in enumerate(source_list): s_e_src = src.s_e(self.simulation, self._times[tInd]) s_e[:, i] = s_e[:, i] + s_e_src return self._edgeCurl * hSolution - s_e def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._edgeCurl.T * dun_dm_v return self._edgeCurl * dun_dm_v def _jDeriv_m(self, tInd, src, v, adjoint=False): return Zero() # assumes the source doesn't depend on the model def _b(self, hSolution, source_list, tInd): h = self._h(hSolution, source_list, tInd) return self.simulation.MeI * (self._MeMu * h) def _bDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._hDeriv_u( tInd, src, self._MeMu.T * (self.simulation.MeI.T * dun_dm_v), adjoint=adjoint, ) return self.simulation.MeI * (self._MeMu * self._hDeriv_u(tInd, src, dun_dm_v)) def _bDeriv_m(self, tInd, src, v, adjoint=False): if adjoint: return self._hDeriv_m( tInd, src, self._MeMu.T * (self.simulation.MeI.T * v), adjoint=adjoint ) return self.simulation.MeI * (self._MeMu * self._hDeriv_m(tInd, src, v)) def _dbdt(self, hSolution, source_list, tInd): dhdt = self._dhdt(hSolution, source_list, tInd) return self.simulation.MeI * (self._MeMu * dhdt) def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._dhdtDeriv_u( tInd, src, self._MeMu.T * (self.simulation.MeI.T * dun_dm_v), adjoint=adjoint, ) return self.simulation.MeI * ( self._MeMu * self._dhdtDeriv_u(tInd, src, dun_dm_v) ) def _dbdtDeriv_m(self, tInd, src, v, adjoint=False): if adjoint: return self._dhdtDeriv_m( tInd, src, self._MeMu.T * (self.simulation.MeI.T * v), adjoint=adjoint ) return self.simulation.MeI * (self._MeMu * self._dhdtDeriv_m(tInd, src, v)) def _e(self, hSolution, source_list, tInd): return self.simulation.MfI * ( self._MfRho * self._j(hSolution, source_list, tInd) ) def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint: return self._jDeriv_u( tInd, src, self._MfRho.T * (self.simulation.MfI.T * dun_dm_v), adjoint=True, ) return self.simulation.MfI * (self._MfRho * self._jDeriv_u(tInd, src, dun_dm_v)) def _eDeriv_m(self, tInd, src, v, adjoint=False): j = mkvc(self[src, "j", tInd]) if adjoint is True: return self._MfRhoDeriv( j, self.simulation.MfI.T * v, adjoint ) + self._jDeriv_m(tInd, src, self._MfRho * v) return self.simulation.MfI * ( self._MfRhoDeriv(j, v) + self._MfRho * self._jDeriv_m(tInd, src, v) ) def _charge(self, hSolution, source_list, tInd): vol = sdiag(self.simulation.mesh.vol) return ( epsilon_0 * vol * (self.simulation.mesh.faceDiv * self._e(hSolution, source_list, tInd)) ) class Fields3DCurrentDensity(FieldsTDEM): """Fancy Field Storage for a TDEM simulation.""" knownFields = {"jSolution": "F"} aliasFields = { "dhdt": ["jSolution", "E", "_dhdt"], "dbdt": ["jSolution", "E", "_dbdt"], "j": ["jSolution", "F", "_j"], "e": ["jSolution", "F", "_e"], "charge": ["jSolution", "CC", "_charge"], "charge_density": ["jSolution", "CC", "_charge_density"], } def startup(self): self._times = self.simulation.times self._edgeCurl = self.simulation.mesh.edgeCurl self._MeMuI = self.simulation.MeMuI self._MfRho = self.simulation.MfRho self._MfRhoDeriv = self.simulation.MfRhoDeriv def _TLoc(self, fieldType): # if fieldType in ['h', 'j']: return "N" def _j(self, jSolution, source_list, tInd): return jSolution def _jDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): return dun_dm_v def _jDeriv_m(self, tInd, src, v, adjoint=False): return Zero() def _h(self, jSolution, source_list, tInd): raise NotImplementedError( "Please use Simulation3DMagneticField to get h-fields" ) def _dhdt(self, jSolution, source_list, tInd): C = self._edgeCurl MfRho = self._MfRho MeMuI = self._MeMuI dhdt = -MeMuI * (C.T * (MfRho * jSolution)) for i, src in enumerate(source_list): s_m = src.s_m(self.simulation, self.simulation.times[tInd]) dhdt[:, i] = MeMuI * s_m + dhdt[:, i] return dhdt def _dhdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): C = self._edgeCurl MfRho = self._MfRho MeMuI = self._MeMuI if adjoint is True: return -MfRho.T * (C * (MeMuI.T * dun_dm_v)) return -MeMuI * (C.T * (MfRho * dun_dm_v)) def _dhdtDeriv_m(self, tInd, src, v, adjoint=False): jSolution = self[[src], "jSolution", tInd].flatten() C = self._edgeCurl MeMuI = self._MeMuI if adjoint is True: return -self._MfRhoDeriv(jSolution, C * (MeMuI * v), adjoint) return -MeMuI * (C.T * (self._MfRhoDeriv(jSolution, v))) def _e(self, jSolution, source_list, tInd): return self.simulation.MfI * ( self._MfRho * self._j(jSolution, source_list, tInd) ) def _eDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): if adjoint is True: return self._MfRho.T * (self.simulation.MfI.T * dun_dm_v) return self.simulation.MfI * (self._MfRho * dun_dm_v) def _eDeriv_m(self, tInd, src, v, adjoint=False): jSolution = mkvc(self[src, "jSolution", tInd]) if adjoint: return self._MfRhoDeriv(jSolution, self.simulation.MfI.T * v, adjoint) return self.simulation.MfI * self._MfRhoDeriv(jSolution, v) def _charge(self, jSolution, source_list, tInd): vol = sdiag(self.simulation.mesh.vol) return vol * self._charge_density(jSolution, source_list, tInd) def _charge_density(self, jSolution, source_list, tInd): return epsilon_0 * ( self.simulation.mesh.faceDiv * self._e(jSolution, source_list, tInd) ) def _dbdt(self, jSolution, source_list, tInd): dhdt = mkvc(self._dhdt(jSolution, source_list, tInd)) return self.simulation.MeI * (self.simulation.MeMu * dhdt) def _dbdtDeriv_u(self, tInd, src, dun_dm_v, adjoint=False): # dhdt = mkvc(self[src, 'dhdt', tInd]) if adjoint: return self._dhdtDeriv_u( tInd, src, self.simulation.MeMu.T * (self.simulation.MeI.T * dun_dm_v), adjoint, ) return self.simulation.MeI * ( self.simulation.MeMu * self._dhdtDeriv_u(tInd, src, dun_dm_v) ) def _dbdtDeriv_m(self, tInd, src, v, adjoint=False): if adjoint: return self._dhdtDeriv_m( tInd, src, self.simulation.MeMu.T * (self.simulation.MeI.T * v), adjoint ) return self.simulation.MeI * ( self.simulation.MeMu * self._dhdtDeriv_m(tInd, src, v) ) ############ # Deprecated ############ @deprecate_class(removal_version="0.16.0", error=True) class Fields_Derivs_eb(FieldsDerivativesEB): pass @deprecate_class(removal_version="0.16.0", error=True) class Fields_Derivs_hj(FieldsDerivativesHJ): pass @deprecate_class(removal_version="0.16.0", error=True) class Fields3D_b(Fields3DMagneticFluxDensity): pass @deprecate_class(removal_version="0.16.0", error=True) class Fields3D_e(Fields3DElectricField): pass @deprecate_class(removal_version="0.16.0", error=True) class Fields3D_h(Fields3DMagneticField): pass @deprecate_class(removal_version="0.16.0", error=True) class Fields3D_j(Fields3DCurrentDensity): pass
simpeg/simpeg
SimPEG/electromagnetics/time_domain/fields.py
Python
mit
24,177
"""Commands: "@[botname] XXXXX".""" import logging import random from abc import ABC from twisted.internet import reactor from bot.commands.abstract.command import Command from bot.utilities.permission import Permission class Speech(Command, ABC): """Natural language.""" perm = Permission.User reloadable = False def __init__(self, bot): """Define self.chatbot here""" self.chatbot = Chatbot() def match(self, bot, user, msg, tag_info): """Match if the bot is tagged.""" return bot.config.nickname in msg.lower() def run(self, bot, user, msg, tag_info): """Send message to cleverbot only if no other command got triggered.""" if not bot.antispeech: msg = msg.lower() msg = msg.replace("@", "") msg = msg.replace(bot.config.nickname, "") """Get reply in extra thread, so bot doesnt pause while waiting for the reply. Problem: Only replies after someone else wrote something in chat.""" reactor.callInThread(self.answer, bot, user, msg) def answer(self, bot, user, msg): """Answer the message of a user.""" output = self.chatbot.get_reply(msg, user) if output is None: logging.warning( "WARNING: No chatbot ({}) reply retrieved. Cannot reply.".format( self.chatbot.get_name() ) ) if not random.randint(0, 3): output = (output or "") + " monkaS" bot.write("@" + user + " " + output) class Chatbot(ABC): """Class that replies to messages.""" def __init__(self): """Initialize the command.""" raise NotImplementedError def get_reply(self, message, name): """Get a reply to a message. Keeps different conversations for different names. Should usually be called in a different thread, since this might take some time. """ raise NotImplementedError def get_name(self): """Returns name of this chatbot.""" raise NotImplementedError
NMisko/monkalot
bot/commands/abstract/speech.py
Python
mit
2,100
# -*- coding: utf-8 -*- """hipnotify""" from .hipnotify import Room __author__ = 'Akira Chiku' __email__ = 'akira.chiku@gmail.com' __version__ = '1.0.9' __all__ = [ 'Room' ]
achiku/hipnotify
hipnotify/__init__.py
Python
isc
181
''' 'funcassociateinfo.py' sets up the command line arguments for the 'genecentric-fainfo' program. ''' import sys import bpm import argparse parser = argparse.ArgumentParser( description='Query Funcassociate for information to use with \'go-enrich\'', formatter_class=argparse.ArgumentDefaultsHelpFormatter) aa = parser.add_argument aa('command', type=str, choices=['species', 'namespaces'], metavar='QUERY_COMMAND', help='The \'species\' command will ask Funcassociate for a list of ' 'available species to perform GO enrichment with. The \'namespaces\' ' 'command, with a corresponding species name, will ask Funcassociate ' 'for a list of available namespaces to use with the species ' 'specified.') aa('species', type=str, nargs='?', default=None, metavar='QUERY_SPECIES', help='The species to be used when querying for available namespaces. ' 'This should not be used with the \'species\' command.') aa('-v', '--verbose', dest='verbose', action='store_true', help='If set, more output will be shown.') conf = parser.parse_args() if conf.command == 'namespaces' and conf.species is None: print >> sys.stderr, \ 'You must provide a species when using the \'namespace\' command.' sys.exit(1) # Set the global conf variable bpm.conf = conf
BurntSushi/genecentric
bpm/cmdargs/funcassociateinfo.py
Python
gpl-2.0
1,332
from ConfigParser import ConfigParser config = ConfigParser() config.read('spacephone.conf')
SpacePhone/spacephone.org-python
spacephone/config.py
Python
gpl-2.0
94
from django.db.transaction import non_atomic_requests from django.utils.translation import ( ugettext as _, ugettext_lazy as _lazy, pgettext_lazy) import jingo import jinja2 from olympia import amo from olympia.amo.helpers import urlparams from olympia.amo.urlresolvers import reverse from olympia.amo.utils import render @jinja2.contextfunction def install_button(context, addon, version=None, show_contrib=True, show_warning=True, src='', collection=None, size='', detailed=False, mobile=False, impala=False, latest_beta=False): """ If version isn't given, we use the latest version. You can set latest_beta parameter to use latest beta version instead. """ assert not (version and latest_beta), ( 'Only one of version and latest_beta can be specified') request = context['request'] app, lang = context['APP'], context['LANG'] src = src or context.get('src') or request.GET.get('src', '') collection = ((collection.uuid if hasattr(collection, 'uuid') else None) or collection or context.get('collection') or request.GET.get('collection') or request.GET.get('collection_id') or request.GET.get('collection_uuid')) button = install_button_factory(addon, app, lang, version, show_contrib, show_warning, src, collection, size, detailed, impala, latest_beta) installed = (request.user.is_authenticated() and addon.id in request.user.mobile_addons) c = {'button': button, 'addon': addon, 'version': button.version, 'installed': installed} if impala: template = 'addons/impala/button.html' elif mobile: template = 'addons/mobile/button.html' else: template = 'addons/button.html' t = jingo.render_to_string(request, template, c) return jinja2.Markup(t) @jinja2.contextfunction def big_install_button(context, addon, **kwargs): from olympia.addons.helpers import statusflags flags = jinja2.escape(statusflags(context, addon)) button = install_button(context, addon, detailed=True, size='prominent', **kwargs) markup = u'<div class="install-wrapper %s">%s</div>' % (flags, button) return jinja2.Markup(markup) @jinja2.contextfunction def mobile_install_button(context, addon, **kwargs): from olympia.addons.helpers import statusflags button = install_button(context, addon, detailed=True, size='prominent', mobile=True, show_contrib=False, **kwargs) flags = jinja2.escape(statusflags(context, addon)) markup = u'<div class="install-wrapper %s">%s</div>' % (flags, button) return jinja2.Markup(markup) def install_button_factory(*args, **kwargs): button = InstallButton(*args, **kwargs) # Order matters. We want to highlight unreviewed before featured. They # should be mutually exclusive, but you never know. classes = (('is_persona', PersonaInstallButton), ('lite', LiteInstallButton), ('unreviewed', UnreviewedInstallButton), ('experimental', ExperimentalInstallButton), ('featured', FeaturedInstallButton)) for pred, cls in classes: if getattr(button, pred, False): button.__class__ = cls break button.prepare() return button class InstallButton(object): button_class = ['download'] install_class = [] install_text = '' def __init__(self, addon, app, lang, version=None, show_contrib=True, show_warning=True, src='', collection=None, size='', detailed=False, impala=False, latest_beta=False): self.addon, self.app, self.lang = addon, app, lang self.latest = version is None self.version = version if not self.version: self.version = (addon.current_beta_version if latest_beta else addon.current_version) self.src = src self.collection = collection self.size = size self.detailed = detailed self.impala = impala self.is_beta = self.version and self.version.is_beta version_unreviewed = self.version and self.version.is_unreviewed self.lite = self.version and self.version.is_lite self.experimental = addon.is_experimental self.unreviewed = (addon.is_unreviewed() or version_unreviewed or self.is_beta) self.featured = (not self.unreviewed and not self.lite and not self.experimental and not self.is_beta and addon.is_featured(app, lang)) self.is_persona = addon.type == amo.ADDON_PERSONA self._show_contrib = show_contrib self.show_contrib = (show_contrib and addon.takes_contributions and addon.annoying == amo.CONTRIB_ROADBLOCK) self.show_warning = show_warning and self.unreviewed def prepare(self): """Called after the class is set to manage contributions.""" # Get a copy for this instance. self.button_class = list(self.__class__.button_class) self.install_class = list(self.__class__.install_class) if self.show_contrib: try: self.button_class.remove('download') except ValueError: pass self.button_class += ['contrib', 'go'] self.install_class.append('contrib') if self.size: self.button_class.append(self.size) if self.is_beta: self.install_class.append('beta') def attrs(self): rv = {} addon = self.addon if (self._show_contrib and addon.takes_contributions and addon.annoying == amo.CONTRIB_AFTER): rv['data-after'] = 'contrib' if addon.type == amo.ADDON_SEARCH: rv['data-search'] = 'true' return rv def links(self): if not self.version: return [] rv = [] files = [f for f in self.version.all_files if f.status in amo.VALID_STATUSES] for file in files: text, url, os = self.file_details(file) rv.append(Link(text, self.fix_link(url), os, file)) return rv def file_details(self, file): platform = file.platform if self.latest and not self.is_beta and ( self.addon.status == file.status == amo.STATUS_PUBLIC): url = file.latest_xpi_url() elif self.latest and self.is_beta and self.addon.show_beta: url = file.latest_xpi_url(beta=True) else: url = file.get_url_path(self.src) if platform == amo.PLATFORM_ALL.id: text, os = _('Download Now'), None else: text, os = _('Download'), amo.PLATFORMS[platform] if self.show_contrib: # L10n: please keep &nbsp; in the string so &rarr; does not wrap. text = jinja2.Markup(_('Continue to Download&nbsp;&rarr;')) roadblock = reverse('addons.roadblock', args=[self.addon.id]) url = urlparams(roadblock, version=self.version.version) return text, url, os def fix_link(self, url): if self.src: url = urlparams(url, src=self.src) if self.collection: url = urlparams(url, collection_id=self.collection) return url class FeaturedInstallButton(InstallButton): install_class = ['featuredaddon'] install_text = _lazy(u'Featured') class UnreviewedInstallButton(InstallButton): install_class = ['unreviewed'] install_text = pgettext_lazy('install_button', u'Not Reviewed') button_class = 'download caution'.split() class LiteInstallButton(InstallButton): install_class = ['lite'] button_class = ['caution'] install_text = pgettext_lazy('install_button', u'Experimental') class ExperimentalInstallButton(InstallButton): install_class = ['lite'] button_class = ['caution'] install_text = pgettext_lazy('install_button', u'Experimental') class PersonaInstallButton(InstallButton): install_class = ['persona'] def links(self): return [Link(_(u'Add to {0}').format(unicode(self.app.pretty)), reverse('addons.detail', args=[amo.PERSONAS_ADDON_ID]))] def attrs(self): rv = super(PersonaInstallButton, self).attrs() rv['data-browsertheme'] = self.addon.persona.json_data return rv class Link(object): def __init__(self, text, url, os=None, file=None): self.text, self.url, self.os, self.file = text, url, os, file @non_atomic_requests def js(request): return render(request, 'addons/popups.html', content_type='text/javascript')
Prashant-Surya/addons-server
src/olympia/addons/buttons.py
Python
bsd-3-clause
9,043
# Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the methods defined in skill services.""" from __future__ import annotations import logging from core import feconf from core.constants import constants from core.domain import config_services from core.domain import question_domain from core.domain import skill_domain from core.domain import skill_fetchers from core.domain import skill_services from core.domain import state_domain from core.domain import suggestion_services from core.domain import topic_domain from core.domain import topic_fetchers from core.domain import user_services from core.platform import models from core.tests import test_utils (skill_models, suggestion_models, question_models) = models.Registry.import_models( # pylint: disable=line-too-long [models.NAMES.skill, models.NAMES.suggestion, models.NAMES.question]) class SkillServicesUnitTests(test_utils.GenericTestBase): """Test the skill services module.""" SKILL_ID = None USER_ID = 'user' MISCONCEPTION_ID_1 = 1 MISCONCEPTION_ID_2 = 2 def setUp(self): super(SkillServicesUnitTests, self).setUp() example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'), state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>') ) skill_contents = skill_domain.SkillContents( state_domain.SubtitledHtml('1', '<p>Explanation</p>'), [example_1], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { '1': {}, '2': {}, '3': {} } }), state_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } }) ) misconceptions = [skill_domain.Misconception( self.MISCONCEPTION_ID_1, 'name', '<p>description</p>', '<p>default_feedback</p>', True)] self.num_queries_to_fetch = 10 self.SKILL_ID = skill_services.get_new_skill_id() self.SKILL_ID2 = skill_services.get_new_skill_id() self.SKILL_ID3 = skill_services.get_new_skill_id() self.signup('a@example.com', 'A') self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME) self.signup('admin2@example.com', 'adm2') self.user_id_a = self.get_user_id_from_email('a@example.com') self.user_id_admin = ( self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)) self.user_id_admin_2 = self.get_user_id_from_email('admin2@example.com') self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME, 'adm2']) self.user_a = user_services.get_user_actions_info(self.user_id_a) self.user_admin = user_services.get_user_actions_info( self.user_id_admin) self.user_admin_2 = user_services.get_user_actions_info( self.user_id_admin_2) self.skill = self.save_new_skill( self.SKILL_ID, self.USER_ID, description='Description', misconceptions=misconceptions, skill_contents=skill_contents, prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) def test_apply_change_list_with_invalid_property_name(self): class MockSkillChange: def __init__(self, cmd, property_name): self.cmd = cmd self.property_name = property_name invalid_skill_change_list = [MockSkillChange( skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'invalid_property_name')] with self.assertRaisesRegex(Exception, 'Invalid change dict.'): skill_services.apply_change_list( self.SKILL_ID, invalid_skill_change_list, self.user_id_a) def test_compute_summary(self): skill_summary = skill_services.compute_summary_of_skill(self.skill) self.assertEqual(skill_summary.id, self.SKILL_ID) self.assertEqual(skill_summary.description, 'Description') self.assertEqual(skill_summary.misconception_count, 1) self.assertEqual(skill_summary.worked_examples_count, 1) def test_get_image_filenames_from_skill(self): explanation_html = ( 'Explanation with image: <oppia-noninteractive-image ' 'filepath-with-value="&quot;img.svg&quot;" caption-with-value=' '"&quot;&quot;" alt-with-value="&quot;Image&quot;">' '</oppia-noninteractive-image>' ) example_explanation_html = ( 'Explanation with image: <oppia-noninteractive-image ' 'filepath-with-value="&quot;img2.svg&quot;" caption-with-value=' '"&quot;&quot;" alt-with-value="&quot;Image&quot;">' '</oppia-noninteractive-image>' ) example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'), state_domain.SubtitledHtml('3', example_explanation_html) ) self.skill.skill_contents = skill_domain.SkillContents( state_domain.SubtitledHtml('1', explanation_html), [example_1], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { '1': {}, '2': {}, '3': {} } }), state_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } }) ) filenames = skill_services.get_image_filenames_from_skill(self.skill) self.assertItemsEqual(filenames, ['img.svg', 'img2.svg']) def test_get_new_skill_id(self): new_skill_id = skill_services.get_new_skill_id() self.assertEqual(len(new_skill_id), 12) self.assertEqual(skill_models.SkillModel.get_by_id(new_skill_id), None) def test_get_descriptions_of_skills(self): example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'), state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>') ) self.save_new_skill( 'skill_id_1', self.user_id_admin, description='Description 1', misconceptions=[], skill_contents=skill_domain.SkillContents( state_domain.SubtitledHtml('1', '<p>Explanation</p>'), [example_1], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { '1': {}, '2': {}, '3': {} } }), state_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } }) ) ) self.save_new_skill( 'skill_id_2', self.user_id_admin, description='Description 2', misconceptions=[], skill_contents=skill_domain.SkillContents( state_domain.SubtitledHtml('1', '<p>Explanation</p>'), [example_1], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { '1': {}, '2': {}, '3': {} } }), state_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } }) ) ) skill_services.delete_skill(self.user_id_admin, 'skill_id_2') skill_descriptions, deleted_skill_ids = ( skill_services.get_descriptions_of_skills( ['skill_id_1', 'skill_id_2'])) self.assertEqual(deleted_skill_ids, ['skill_id_2']) self.assertEqual( skill_descriptions, { 'skill_id_1': 'Description 1', 'skill_id_2': None } ) def test_get_rubrics_of_linked_skills(self): example_1 = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'), state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>') ) self.save_new_skill( 'skill_id_1', self.user_id_admin, description='Description 1', misconceptions=[], skill_contents=skill_domain.SkillContents( state_domain.SubtitledHtml('1', '<p>Explanation</p>'), [example_1], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { '1': {}, '2': {}, '3': {} } }), state_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } }) ) ) self.save_new_skill( 'skill_id_2', self.user_id_admin, description='Description 2', misconceptions=[], skill_contents=skill_domain.SkillContents( state_domain.SubtitledHtml('1', '<p>Explanation</p>'), [example_1], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { '1': {}, '2': {}, '3': {} } }), state_domain.WrittenTranslations.from_dict({ 'translations_mapping': { '1': {}, '2': {}, '3': {} } }) ) ) skill_services.delete_skill(self.user_id_admin, 'skill_id_2') skill_rubrics, deleted_skill_ids = ( skill_services.get_rubrics_of_skills( ['skill_id_1', 'skill_id_2'])) self.assertEqual(deleted_skill_ids, ['skill_id_2']) self.assertEqual( skill_rubrics, { 'skill_id_1': [ skill_domain.Rubric( constants.SKILL_DIFFICULTIES[0], ['Explanation 1'] ).to_dict(), skill_domain.Rubric( constants.SKILL_DIFFICULTIES[1], ['Explanation 2'] ).to_dict(), skill_domain.Rubric( constants.SKILL_DIFFICULTIES[2], ['Explanation 3'] ).to_dict()], 'skill_id_2': None } ) def test_get_skill_from_model(self): skill_model = skill_models.SkillModel.get(self.SKILL_ID) skill = skill_fetchers.get_skill_from_model(skill_model) self.assertEqual(skill.to_dict(), self.skill.to_dict()) def test_get_skill_summary_from_model(self): skill_summary_model = skill_models.SkillSummaryModel.get(self.SKILL_ID) skill_summary = skill_services.get_skill_summary_from_model( skill_summary_model) self.assertEqual(skill_summary.id, self.SKILL_ID) self.assertEqual(skill_summary.description, 'Description') self.assertEqual(skill_summary.misconception_count, 1) self.assertEqual(skill_summary.worked_examples_count, 1) def test_get_all_skill_summaries(self): skill_summaries = skill_services.get_all_skill_summaries() self.assertEqual(len(skill_summaries), 1) self.assertEqual(skill_summaries[0].id, self.SKILL_ID) self.assertEqual(skill_summaries[0].description, 'Description') self.assertEqual(skill_summaries[0].misconception_count, 1) self.assertEqual(skill_summaries[0].worked_examples_count, 1) def test_commit_log_entry(self): skill_commit_log_entry = ( skill_models.SkillCommitLogEntryModel.get_commit(self.SKILL_ID, 1) ) self.assertEqual(skill_commit_log_entry.commit_type, 'create') self.assertEqual(skill_commit_log_entry.skill_id, self.SKILL_ID) self.assertEqual(skill_commit_log_entry.user_id, self.USER_ID) def test_get_skill_summary_by_id(self): skill_summary = skill_services.get_skill_summary_by_id(self.SKILL_ID) self.assertEqual(skill_summary.id, self.SKILL_ID) self.assertEqual(skill_summary.description, 'Description') self.assertEqual(skill_summary.misconception_count, 1) def test_get_filtered_skill_summaries(self): self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, None, None, None)) self.assertEqual(next_cursor, None) self.assertFalse(more) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2) self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( 1, None, 'english', None, None, None)) self.assertEqual(len(augmented_skill_summaries), 0) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, None, 'Oldest Created', None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID) self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID2) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, None, 'Most Recently Updated', None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2) self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, None, 'Least Recently Updated', None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID) self.assertEqual(augmented_skill_summaries[1].id, self.SKILL_ID2) def test_cursor_behaves_correctly_when_fetching_skills_in_batches(self): self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=[]) self.save_new_skill( self.SKILL_ID3, self.USER_ID, description='Description3', prerequisite_skill_ids=[]) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( 1, None, None, None, None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertIsInstance(next_cursor, str) self.assertTrue(more) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, None, None, next_cursor)) self.assertEqual(len(augmented_skill_summaries), 1) self.assertIsNone(next_cursor) self.assertFalse(more) def test_filter_skills_by_status_all(self): self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, None, None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(next_cursor, None) self.assertFalse(more) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, 'All', None, None, None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(next_cursor, None) self.assertFalse(more) def test_filter_skills_by_status_assigned(self): self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, 'Assigned', None, None, None, None)) self.assertEqual(len(augmented_skill_summaries), 0) self.assertEqual(next_cursor, None) self.assertFalse(more) topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.USER_ID, name='topic1', abbreviated_name='topic-one', url_fragment='topic-one', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[self.SKILL_ID2], subtopics=[], next_subtopic_id=1) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, 'Assigned', None, None, None, None)) self.assertEqual(augmented_skill_summaries[0].topic_names, ['topic1']) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2) self.assertEqual(next_cursor, None) self.assertFalse(more) def test_filter_skills_by_status_unassigned(self): self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, 'Unassigned', None, None, None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(next_cursor, None) self.assertFalse(more) def test_filter_skills_by_classroom_name(self): augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, 'english', None, None, None)) self.assertEqual(len(augmented_skill_summaries), 0) self.assertEqual(next_cursor, None) self.assertFalse(more) self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Description2', prerequisite_skill_ids=['skill_id_1', 'skill_id_2']) topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.USER_ID, name='topic1', abbreviated_name='topic-two', url_fragment='topic-two', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[self.SKILL_ID2], subtopics=[], next_subtopic_id=1) config_services.set_property( self.user_id_admin, 'classroom_pages_data', [{ 'url_fragment': 'math', 'name': 'math', 'topic_ids': [topic_id], 'topic_list_intro': 'Topics Covered', 'course_details': 'Course Details' }] ) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, 'math', None, None, None)) self.assertEqual(augmented_skill_summaries[0].topic_names, ['topic1']) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2) self.assertEqual( augmented_skill_summaries[0].classroom_names, ['math']) self.assertEqual(next_cursor, None) self.assertFalse(more) def test_filter_skills_by_keywords(self): self.save_new_skill( self.SKILL_ID2, self.USER_ID, description='Alpha', misconceptions=None, skill_contents=None, prerequisite_skill_ids=[]) self.save_new_skill( self.SKILL_ID3, self.USER_ID, description='Beta', misconceptions=None, skill_contents=None, prerequisite_skill_ids=[]) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, None, None, None)) self.assertEqual(len(augmented_skill_summaries), 3) self.assertEqual(next_cursor, None) self.assertFalse(more) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( 1, None, None, ['Non_existent'], 'Least Recently Updated', None)) self.assertEqual(len(augmented_skill_summaries), 0) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, [], None, None)) self.assertEqual(len(augmented_skill_summaries), 3) self.assertEqual(next_cursor, None) self.assertFalse(more) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, ['descr'], None, None)) self.assertEqual(len(augmented_skill_summaries), 1) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID) self.assertEqual(next_cursor, None) self.assertFalse(more) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, ['alph'], None, None)) self.assertEqual(len(augmented_skill_summaries), 1) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID2) self.assertEqual(next_cursor, None) self.assertFalse(more) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, ['bet'], None, None)) self.assertEqual(len(augmented_skill_summaries), 1) self.assertEqual(augmented_skill_summaries[0].id, self.SKILL_ID3) self.assertEqual(next_cursor, None) self.assertFalse(more) augmented_skill_summaries, next_cursor, more = ( skill_services.get_filtered_skill_summaries( self.num_queries_to_fetch, None, None, ['alp', 'bet'], None, None)) self.assertEqual(len(augmented_skill_summaries), 2) self.assertEqual(next_cursor, None) self.assertFalse(more) def test_get_all_topic_assignments_for_skill(self): topic_id = topic_fetchers.get_new_topic_id() topic_id_1 = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.USER_ID, name='Topic1', abbreviated_name='topic-three', url_fragment='topic-three', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[self.SKILL_ID], subtopics=[], next_subtopic_id=1) subtopic = topic_domain.Subtopic.from_dict({ 'id': 1, 'title': 'subtopic1', 'skill_ids': [self.SKILL_ID], 'thumbnail_filename': None, 'thumbnail_bg_color': None, 'thumbnail_size_in_bytes': None, 'url_fragment': 'subtopic-one' }) self.save_new_topic( topic_id_1, self.USER_ID, name='Topic2', abbreviated_name='topic-four', url_fragment='topic-four', description='Description2', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic], next_subtopic_id=2) topic_assignments = ( skill_services.get_all_topic_assignments_for_skill(self.SKILL_ID)) topic_assignments = sorted( topic_assignments, key=lambda i: i.topic_name) self.assertEqual(len(topic_assignments), 2) self.assertEqual(topic_assignments[0].topic_name, 'Topic1') self.assertEqual(topic_assignments[0].topic_id, topic_id) self.assertEqual(topic_assignments[0].topic_version, 1) self.assertIsNone(topic_assignments[0].subtopic_id) self.assertEqual(topic_assignments[1].topic_name, 'Topic2') self.assertEqual(topic_assignments[1].topic_id, topic_id_1) self.assertEqual(topic_assignments[1].topic_version, 1) self.assertEqual(topic_assignments[1].subtopic_id, 1) def test_remove_skill_from_all_topics(self): topic_id = topic_fetchers.get_new_topic_id() topic_id_1 = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.USER_ID, name='Topic1', abbreviated_name='topic-five', url_fragment='topic-five', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[self.SKILL_ID], subtopics=[], next_subtopic_id=1) subtopic = topic_domain.Subtopic.from_dict({ 'id': 1, 'title': 'subtopic1', 'skill_ids': [self.SKILL_ID], 'thumbnail_filename': None, 'thumbnail_bg_color': None, 'thumbnail_size_in_bytes': None, 'url_fragment': 'subtopic-one' }) self.save_new_topic( topic_id_1, self.USER_ID, name='Topic2', abbreviated_name='topic-six', url_fragment='topic-six', description='Description2', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic], next_subtopic_id=2) skill_services.remove_skill_from_all_topics(self.USER_ID, self.SKILL_ID) topic_assignments_dict = ( skill_services.get_all_topic_assignments_for_skill(self.SKILL_ID)) self.assertEqual(len(topic_assignments_dict), 0) def test_successfully_replace_skill_id_in_all_topics(self): topic_id = topic_fetchers.get_new_topic_id() topic_id_1 = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.USER_ID, name='Topic1', abbreviated_name='topic-five', url_fragment='topic-five', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[self.SKILL_ID], subtopics=[], next_subtopic_id=1) subtopic = topic_domain.Subtopic.from_dict({ 'id': 1, 'title': 'subtopic1', 'skill_ids': [self.SKILL_ID], 'thumbnail_filename': None, 'thumbnail_bg_color': None, 'thumbnail_size_in_bytes': None, 'url_fragment': 'subtopic-one' }) self.save_new_topic( topic_id_1, self.USER_ID, name='Topic2', abbreviated_name='topic-six', url_fragment='topic-six', description='Description2', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[], subtopics=[subtopic], next_subtopic_id=2) topic_assignments_dict = ( skill_services.get_all_topic_assignments_for_skill('new_skill_id')) self.assertEqual(len(topic_assignments_dict), 0) skill_services.replace_skill_id_in_all_topics( self.USER_ID, self.SKILL_ID, 'new_skill_id') topic_assignments_dict = ( skill_services.get_all_topic_assignments_for_skill('new_skill_id')) self.assertEqual(len(topic_assignments_dict), 2) def test_failure_replace_skill_id_in_all_topics(self): topic_id = topic_fetchers.get_new_topic_id() self.save_new_topic( topic_id, self.USER_ID, name='Topic1', abbreviated_name='topic-five', url_fragment='topic-five', description='Description', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[self.SKILL_ID, 'new_skill_id'], subtopics=[], next_subtopic_id=1) error_message = ( 'Found topic \'Topic1\' contains the two skills to be merged. ' 'Please unassign one of these skills from topic ' 'and retry this operation.') with self.assertRaisesRegex(Exception, error_message): skill_services.replace_skill_id_in_all_topics( self.USER_ID, self.SKILL_ID, 'new_skill_id') def test_update_skill(self): changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_ADD_SKILL_MISCONCEPTION, 'new_misconception_dict': { 'id': self.skill.next_misconception_id, 'name': 'test name', 'notes': '<p>test notes</p>', 'feedback': '<p>test feedback</p>', 'must_be_addressed': True } }), skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME), 'misconception_id': self.skill.next_misconception_id, 'old_value': 'test name', 'new_value': 'Name' }), skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED ), 'misconception_id': self.skill.next_misconception_id, 'old_value': True, 'new_value': False }), skill_domain.SkillChange({ 'cmd': skill_domain.CMD_ADD_PREREQUISITE_SKILL, 'skill_id': 'skill_id_3' }), skill_domain.SkillChange({ 'cmd': skill_domain.CMD_DELETE_PREREQUISITE_SKILL, 'skill_id': 'skill_id_1' }), skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_RUBRICS, 'difficulty': constants.SKILL_DIFFICULTIES[0], 'explanations': [ '<p>New Explanation 1</p>', '<p>New Explanation 2</p>'] }), skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_RUBRICS, 'difficulty': constants.SKILL_DIFFICULTIES[1], 'explanations': ['<p>Explanation</p>'] }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception name.') skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) skill_summary = skill_services.get_skill_summary_by_id(self.SKILL_ID) self.assertEqual(skill_summary.misconception_count, 2) self.assertEqual(skill_summary.version, 2) self.assertEqual(skill.version, 2) self.assertEqual( skill.prerequisite_skill_ids, ['skill_id_2', 'skill_id_3']) self.assertEqual(skill.misconceptions[1].name, 'Name') self.assertEqual(skill.misconceptions[1].must_be_addressed, False) self.assertEqual( skill.rubrics[0].explanations, [ '<p>New Explanation 1</p>', '<p>New Explanation 2</p>']) self.assertEqual(skill.rubrics[1].explanations, ['<p>Explanation</p>']) def test_merge_skill(self): changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': ( skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID), 'old_value': '', 'new_value': 'TestSkillId' }), skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': ( skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED), 'old_value': None, 'new_value': False }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Merging skill.') skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(skill.version, 2) self.assertEqual(skill.superseding_skill_id, 'TestSkillId') self.assertEqual(skill.all_questions_merged, False) def test_set_merge_complete_for_skill(self): changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': ( skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID), 'old_value': None, 'new_value': self.SKILL_ID }), skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': ( skill_domain.SKILL_PROPERTY_ALL_QUESTIONS_MERGED), 'old_value': False, 'new_value': True }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Setting merge complete for skill.') skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(skill.version, 2) self.assertEqual(skill.all_questions_merged, True) def test_get_merged_skill_ids(self): skill_ids = skill_services.get_merged_skill_ids() self.assertEqual(len(skill_ids), 0) changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': ( skill_domain.SKILL_PROPERTY_SUPERSEDING_SKILL_ID), 'old_value': '', 'new_value': 'TestSkillId' }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Merging skill.') skill_ids = skill_services.get_merged_skill_ids() self.assertEqual(len(skill_ids), 1) self.assertEqual(skill_ids[0], self.SKILL_ID) def test_delete_skill(self): skill_services.delete_skill(self.USER_ID, self.SKILL_ID) self.assertEqual( skill_fetchers.get_skill_by_id(self.SKILL_ID, strict=False), None) self.assertEqual( skill_services.get_skill_summary_by_id( self.SKILL_ID, strict=False), None) def test_delete_skill_marked_deleted(self): skill_models.SkillModel.delete_multi( [self.SKILL_ID], self.USER_ID, '', force_deletion=False) skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID) self.assertTrue(skill_model.deleted) skill_services.delete_skill( self.USER_ID, self.SKILL_ID, force_deletion=True) skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID) self.assertEqual(skill_model, None) self.assertEqual( skill_services.get_skill_summary_by_id( self.SKILL_ID, strict=False), None) def test_delete_skill_model_with_deleted_summary_model(self): skill_summary_model = ( skill_models.SkillSummaryModel.get(self.SKILL_ID)) skill_summary_model.delete() skill_summary_model = ( skill_models.SkillSummaryModel.get(self.SKILL_ID, False)) self.assertIsNone(skill_summary_model) skill_services.delete_skill( self.USER_ID, self.SKILL_ID, force_deletion=True) skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID) self.assertEqual(skill_model, None) self.assertEqual( skill_services.get_skill_summary_by_id( self.SKILL_ID, strict=False), None) def test_delete_skill_model_with_linked_suggestion(self): suggestion_change = { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': { 'question_state_data': self._create_valid_question_data( 'default_state').to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_1'], 'inapplicable_skill_misconception_ids': ['skillid12345-1'] }, 'skill_id': self.SKILL_ID, 'skill_difficulty': 0.3 } suggestion = suggestion_services.create_suggestion( feconf.SUGGESTION_TYPE_ADD_QUESTION, feconf.ENTITY_TYPE_SKILL, self.SKILL_ID, 1, self.user_id_a, suggestion_change, 'test description' ) skill_services.delete_skill( self.user_id_a, self.SKILL_ID, force_deletion=True) skill_model = skill_models.SkillModel.get_by_id(self.SKILL_ID) self.assertEqual(skill_model, None) with self.assertRaisesRegex( Exception, 'The suggestion with id %s has already been accepted/' 'rejected.' % suggestion.suggestion_id): suggestion_services.auto_reject_question_suggestions_for_skill_id( self.SKILL_ID) def test_cannot_update_skill_with_no_commit_message(self): changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE, 'old_value': 'en', 'new_value': 'bn' }) ] with self.assertRaisesRegex( Exception, 'Expected a commit message, received none.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, '') def test_cannot_update_skill_with_empty_changelist(self): with self.assertRaisesRegex( Exception, 'Unexpected error: received an invalid change list when trying to ' 'save skill'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, [], 'No changes made.') def test_mismatch_of_skill_versions(self): changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE, 'old_value': 'en', 'new_value': 'bn' }) ] skill_model = skill_models.SkillModel.get(self.SKILL_ID) skill_model.version = 0 with self.assertRaisesRegex( Exception, 'Unexpected error: trying to update version 0 of skill ' 'from version 1. Please reload the page and try again.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Change language code.') skill_model.version = 2 with self.assertRaisesRegex( Exception, 'Trying to update version 2 of skill from version 1, which is too ' 'old. Please reload the page and try again.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Change language code.') def test_normal_user_cannot_update_skill_property(self): changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': skill_domain.SKILL_PROPERTY_DESCRIPTION, 'old_value': 'Description', 'new_value': 'New description' }) ] with self.assertRaisesRegex( Exception, 'The user does not have enough rights to edit the ' 'skill description.'): skill_services.update_skill( self.user_id_a, self.SKILL_ID, changelist, 'Change description.') def test_update_skill_property(self): skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) old_description = 'Description' new_description = 'New description' self.assertEqual( skill.description, old_description) changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY, 'property_name': skill_domain.SKILL_PROPERTY_DESCRIPTION, 'old_value': old_description, 'new_value': new_description }) ] skill_services.update_skill( self.user_id_admin, self.SKILL_ID, changelist, 'Change description.' ) skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual( skill.description, new_description) def test_update_skill_explanation(self): skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) old_explanation = {'content_id': '1', 'html': '<p>Explanation</p>'} new_explanation = {'content_id': '1', 'html': '<p>New explanation</p>'} self.assertEqual( skill.skill_contents.explanation.to_dict(), old_explanation) changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY, 'property_name': ( skill_domain.SKILL_CONTENTS_PROPERTY_EXPLANATION), 'old_value': old_explanation, 'new_value': new_explanation }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Change explanation.') skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual( skill.skill_contents.explanation.to_dict(), new_explanation) def test_update_skill_worked_examples(self): skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) old_worked_example = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '<p>Example Question 1</p>'), state_domain.SubtitledHtml('3', '<p>Example Explanation 1</p>') ).to_dict() new_worked_example = skill_domain.WorkedExample( state_domain.SubtitledHtml('2', '<p>Example Question 1 new</p>'), state_domain.SubtitledHtml('3', '<p>Example Explanation 1 new</p>') ).to_dict() self.assertEqual(len(skill.skill_contents.worked_examples), 1) self.assertEqual( skill.skill_contents.worked_examples[0].to_dict(), old_worked_example) changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_CONTENTS_PROPERTY, 'property_name': ( skill_domain.SKILL_CONTENTS_PROPERTY_WORKED_EXAMPLES), 'old_value': [old_worked_example], 'new_value': [new_worked_example] }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Change worked examples.') skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.skill_contents.worked_examples), 1) self.assertEqual( skill.skill_contents.worked_examples[0].to_dict(), new_worked_example) def test_delete_skill_misconception(self): skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.misconceptions), 1) self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1) changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION, 'misconception_id': self.MISCONCEPTION_ID_1, }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Delete misconception.') skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(skill.misconceptions, []) def test_does_skill_with_description_exist(self): self.assertEqual( skill_services.does_skill_with_description_exist('Description'), True ) self.assertEqual( skill_services.does_skill_with_description_exist('Does not exist'), False ) def test_update_skill_misconception_notes(self): skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.misconceptions), 1) self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1) self.assertEqual(skill.misconceptions[0].notes, '<p>description</p>') changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES), 'misconception_id': self.MISCONCEPTION_ID_1, 'old_value': '<p>description</p>', 'new_value': '<p>new description</p>' }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Update misconception notes.') skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.misconceptions), 1) self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1) self.assertEqual( skill.misconceptions[0].notes, '<p>new description</p>') def test_update_skill_misconception_feedback(self): skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.misconceptions), 1) self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1) self.assertEqual( skill.misconceptions[0].feedback, '<p>default_feedback</p>') changelist = [ skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK), 'misconception_id': self.MISCONCEPTION_ID_1, 'old_value': '<p>default_feedback</p>', 'new_value': '<p>new feedback</p>' }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Update misconception feedback.') skill = skill_fetchers.get_skill_by_id(self.SKILL_ID) self.assertEqual(len(skill.misconceptions), 1) self.assertEqual(skill.misconceptions[0].id, self.MISCONCEPTION_ID_1) self.assertEqual( skill.misconceptions[0].feedback, '<p>new feedback</p>') def test_skill_has_associated_questions(self): skill_id_1 = skill_services.get_new_skill_id() # type: ignore[no-untyped-call] self.save_new_skill(skill_id_1, 'user', description='Description 1') # type: ignore[no-untyped-call] # Testing that no question is linked to a skill. self.assertEqual( skill_services.skill_has_associated_questions(skill_id_1), False ) questionskilllink_model1 = ( question_models.QuestionSkillLinkModel.create( 'question_id1', skill_id_1, 0.1) ) questionskilllink_model2 = ( question_models.QuestionSkillLinkModel.create( 'question_id2', skill_id_1, 0.2) ) question_models.QuestionSkillLinkModel.put_multi_question_skill_links( [questionskilllink_model1, questionskilllink_model2] ) self.assertEqual( skill_services.skill_has_associated_questions(skill_id_1), True ) def test_update_skill_schema(self): orig_skill_dict = ( skill_fetchers.get_skill_by_id(self.SKILL_ID).to_dict()) changelist = [ skill_domain.SkillChange({ 'cmd': ( skill_domain.CMD_MIGRATE_RUBRICS_SCHEMA_TO_LATEST_VERSION), 'from_version': 1, 'to_version': 2, }) ] skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Update schema.') new_skill_dict = skill_fetchers.get_skill_by_id(self.SKILL_ID).to_dict() # Check version is updated. self.assertEqual(new_skill_dict['version'], 2) # Delete version and check that the two dicts are the same. del orig_skill_dict['version'] del new_skill_dict['version'] self.assertEqual(orig_skill_dict, new_skill_dict) def test_cannot_update_skill_with_invalid_change_list(self): observed_log_messages = [] def _mock_logging_function(msg, *args): """Mocks logging.error().""" observed_log_messages.append(msg % args) logging_swap = self.swap(logging, 'error', _mock_logging_function) assert_raises_context_manager = self.assertRaisesRegex( Exception, '\'str\' object has no attribute \'cmd\'') with logging_swap, assert_raises_context_manager: skill_services.update_skill( self.USER_ID, self.SKILL_ID, 'invalid_change_list', 'commit message') self.assertEqual(len(observed_log_messages), 1) self.assertRegex( observed_log_messages[0], 'object has no' ' attribute \'cmd\' %s invalid_change_list' % self.SKILL_ID) def test_cannot_update_misconception_name_with_invalid_id(self): changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NAME), 'misconception_id': 'invalid_id', 'old_value': 'test name', 'new_value': 'Name' })] with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception name.') def test_cannot_update_misconception_must_be_addressed_with_invalid_id( self): changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_MUST_BE_ADDRESSED), 'misconception_id': 'invalid_id', 'old_value': False, 'new_value': True })] with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception must_be_addressed.') def test_cannot_add_already_existing_prerequisite_skill(self): changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_ADD_PREREQUISITE_SKILL, 'skill_id': 'skill_id_1' })] with self.assertRaisesRegex( Exception, 'The skill is already a prerequisite skill.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Added prereq skill.') def test_cannot_delete_non_existent_prerequisite_skill(self): changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_DELETE_PREREQUISITE_SKILL, 'skill_id': 'skill_id_5' })] with self.assertRaisesRegex( Exception, 'The skill to remove is not a prerequisite skill.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Removed prereq skill.') def test_cannot_add_rubric_with_invalid_difficulty(self): changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_RUBRICS, 'difficulty': 'invalid_difficulty', 'explanations': ['<p>Explanation</p>'] })] with self.assertRaisesRegex( Exception, 'There is no rubric for the given difficulty.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Added rubric.') def test_cannot_delete_misconception_with_invalid_id(self): changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_DELETE_SKILL_MISCONCEPTION, 'misconception_id': 'invalid_id' })] with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Delete misconception') def test_cannot_update_misconception_notes_with_invalid_id(self): changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_NOTES), 'misconception_id': 'invalid_id', 'old_value': 'description', 'new_value': 'new description' })] with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception notes.') def test_cannot_update_misconception_feedback_with_invalid_id(self): changelist = [skill_domain.SkillChange({ 'cmd': skill_domain.CMD_UPDATE_SKILL_MISCONCEPTIONS_PROPERTY, 'property_name': ( skill_domain.SKILL_MISCONCEPTIONS_PROPERTY_FEEDBACK), 'misconception_id': 'invalid_id', 'old_value': 'default_feedback', 'new_value': 'new feedback' })] with self.assertRaisesRegex( Exception, 'There is no misconception with the given id.'): skill_services.update_skill( self.USER_ID, self.SKILL_ID, changelist, 'Updated misconception feedback.') class SkillMasteryServicesUnitTests(test_utils.GenericTestBase): """Test the skill mastery services module.""" SKILL_IDS = [] USER_ID = 'user' DEGREE_OF_MASTERY_1 = 0.0 DEGREE_OF_MASTERY_2 = 0.5 def setUp(self): super(SkillMasteryServicesUnitTests, self).setUp() self.SKILL_ID_1 = skill_services.get_new_skill_id() self.SKILL_ID_2 = skill_services.get_new_skill_id() self.SKILL_ID_3 = skill_services.get_new_skill_id() self.SKILL_IDS = [self.SKILL_ID_1, self.SKILL_ID_2, self.SKILL_ID_3] skill_services.create_user_skill_mastery( self.USER_ID, self.SKILL_ID_1, self.DEGREE_OF_MASTERY_1) skill_services.create_user_skill_mastery( self.USER_ID, self.SKILL_ID_2, self.DEGREE_OF_MASTERY_2) def test_get_user_skill_mastery(self): degree_of_mastery = skill_services.get_user_skill_mastery( self.USER_ID, self.SKILL_ID_1) self.assertEqual(degree_of_mastery, self.DEGREE_OF_MASTERY_1) degree_of_mastery = skill_services.get_user_skill_mastery( self.USER_ID, self.SKILL_ID_3) self.assertEqual(degree_of_mastery, None) def test_get_multi_user_skill_mastery(self): degree_of_mastery = skill_services.get_multi_user_skill_mastery( self.USER_ID, self.SKILL_IDS) self.assertEqual( degree_of_mastery, { self.SKILL_ID_1: self.DEGREE_OF_MASTERY_1, self.SKILL_ID_2: self.DEGREE_OF_MASTERY_2, self.SKILL_ID_3: None }) def test_create_multi_user_skill_mastery(self): skill_id_4 = skill_services.get_new_skill_id() skill_id_5 = skill_services.get_new_skill_id() skill_services.create_multi_user_skill_mastery( self.USER_ID, {skill_id_4: 0.3, skill_id_5: 0.5}) degrees_of_mastery = skill_services.get_multi_user_skill_mastery( self.USER_ID, [skill_id_4, skill_id_5]) self.assertEqual( degrees_of_mastery, {skill_id_4: 0.3, skill_id_5: 0.5}) def test_get_sorted_skill_ids(self): degrees_of_masteries = skill_services.get_multi_user_skill_mastery( self.USER_ID, self.SKILL_IDS) with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 2): sorted_skill_ids = skill_services.get_sorted_skill_ids( degrees_of_masteries) expected_sorted_skill_ids = [self.SKILL_ID_3, self.SKILL_ID_1] self.assertEqual(len(sorted_skill_ids), 2) self.assertEqual(sorted_skill_ids, expected_sorted_skill_ids) with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 3): sorted_skill_ids = skill_services.get_sorted_skill_ids( degrees_of_masteries) expected_sorted_skill_ids = [ self.SKILL_ID_3, self.SKILL_ID_1, self.SKILL_ID_2] self.assertEqual(sorted_skill_ids, expected_sorted_skill_ids) def test_filter_skills_by_mastery(self): with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', 2): arranged_filtered_skill_ids = ( skill_services.filter_skills_by_mastery( self.USER_ID, self.SKILL_IDS)) self.assertEqual(len(arranged_filtered_skill_ids), 2) expected_skill_ids = [self.SKILL_ID_1, self.SKILL_ID_3] self.assertEqual(arranged_filtered_skill_ids, expected_skill_ids) with self.swap(feconf, 'MAX_NUMBER_OF_SKILL_IDS', len(self.SKILL_IDS)): arranged_filtered_skill_ids = ( skill_services.filter_skills_by_mastery( self.USER_ID, self.SKILL_IDS)) self.assertEqual(arranged_filtered_skill_ids, self.SKILL_IDS) class SkillMigrationTests(test_utils.GenericTestBase): def test_migrate_skill_contents_to_latest_schema(self): commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID html_content = ( '<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a' 'mp;quot;+,-,-,+&amp;quot;"></oppia-noninteractive-math>') expected_html_content = ( '<p>Value</p><oppia-noninteractive-math math_content-with-value=' '"{&amp;quot;raw_latex&amp;quot;: &amp;quot;+,-,-,+&amp;quot;, &' 'amp;quot;svg_filename&amp;quot;: &amp;quot;&amp;quot;}"></oppia' '-noninteractive-math>') written_translations_dict = { 'translations_mapping': { 'content1': { 'en': { 'data_format': 'html', 'translation': '', 'needs_update': True }, 'hi': { 'data_format': 'html', 'translation': 'Hey!', 'needs_update': False } } } } written_translations_dict_math = { 'translations_mapping': { 'content1': { 'en': { 'data_format': 'html', 'translation': expected_html_content, 'needs_update': True }, 'hi': { 'data_format': 'html', 'translation': 'Hey!', 'needs_update': False } } } } worked_example_dict = { 'question': { 'content_id': 'question1', 'html': '' }, 'explanation': { 'content_id': 'explanation1', 'html': '' } } worked_example_dict_math = { 'question': { 'content_id': 'question1', 'html': expected_html_content }, 'explanation': { 'content_id': 'explanation1', 'html': expected_html_content } } skill_contents = skill_domain.SkillContents( state_domain.SubtitledHtml( explanation_content_id, ''), [skill_domain.WorkedExample.from_dict(worked_example_dict)], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { explanation_content_id: {} } }), state_domain.WrittenTranslations.from_dict( written_translations_dict)) skill_contents_dict = skill_contents.to_dict() skill_contents_dict['explanation']['html'] = html_content skill_contents_dict['written_translations']['translations_mapping'][ 'content1']['en']['translation'] = html_content skill_contents_dict['worked_examples'][0]['question']['html'] = ( html_content) skill_contents_dict['worked_examples'][0]['explanation']['html'] = ( html_content) model = skill_models.SkillModel( id='skill_id', description='description', language_code='en', misconceptions=[], rubrics=[], skill_contents=skill_contents_dict, next_misconception_id=1, misconceptions_schema_version=1, rubric_schema_version=1, skill_contents_schema_version=1, all_questions_merged=False ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( 'user_id_admin', 'skill model created', commit_cmd_dicts) current_schema_version_swap = self.swap( feconf, 'CURRENT_SKILL_CONTENTS_SCHEMA_VERSION', 4) with current_schema_version_swap: skill = skill_fetchers.get_skill_from_model(model) self.assertEqual(skill.skill_contents_schema_version, 4) self.assertEqual( skill.skill_contents.explanation.html, expected_html_content) self.assertEqual( skill.skill_contents.written_translations.to_dict(), written_translations_dict_math) self.assertEqual( skill.skill_contents.worked_examples[0].to_dict(), worked_example_dict_math) def test_migrate_misconceptions_to_latest_schema(self): commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID html_content = ( '<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a' 'mp;quot;+,-,-,+&amp;quot;"></oppia-noninteractive-math>') expected_html_content = ( '<p>Value</p><oppia-noninteractive-math math_content-with-value=' '"{&amp;quot;raw_latex&amp;quot;: &amp;quot;+,-,-,+&amp;quot;, &' 'amp;quot;svg_filename&amp;quot;: &amp;quot;&amp;quot;}"></oppia' '-noninteractive-math>') skill_contents = skill_domain.SkillContents( state_domain.SubtitledHtml( explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { explanation_content_id: {} } }), state_domain.WrittenTranslations.from_dict({ 'translations_mapping': { explanation_content_id: {} } })) model = skill_models.SkillModel( id='skill_id', description='description', language_code='en', misconceptions=[{ 'id': 1, 'name': 'name', 'notes': html_content, 'feedback': html_content }], rubrics=[], skill_contents=skill_contents.to_dict(), next_misconception_id=2, misconceptions_schema_version=1, rubric_schema_version=1, skill_contents_schema_version=1, all_questions_merged=False ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( 'user_id_admin', 'skill model created', commit_cmd_dicts) current_schema_version_swap = self.swap( feconf, 'CURRENT_MISCONCEPTIONS_SCHEMA_VERSION', 5) with current_schema_version_swap: skill = skill_fetchers.get_skill_from_model(model) self.assertEqual(skill.misconceptions_schema_version, 5) self.assertEqual(skill.misconceptions[0].must_be_addressed, True) self.assertEqual(skill.misconceptions[0].notes, expected_html_content) self.assertEqual( skill.misconceptions[0].feedback, expected_html_content) def test_migrate_rubrics_to_latest_schema(self): commit_cmd = skill_domain.SkillChange({ 'cmd': skill_domain.CMD_CREATE_NEW }) explanation_content_id = feconf.DEFAULT_SKILL_EXPLANATION_CONTENT_ID html_content = ( '<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a' 'mp;quot;+,-,-,+&amp;quot;"></oppia-noninteractive-math>') expected_html_content = ( '<p>Value</p><oppia-noninteractive-math math_content-with-value=' '"{&amp;quot;raw_latex&amp;quot;: &amp;quot;+,-,-,+&amp;quot;, &' 'amp;quot;svg_filename&amp;quot;: &amp;quot;&amp;quot;}"></oppia' '-noninteractive-math>') skill_contents = skill_domain.SkillContents( state_domain.SubtitledHtml( explanation_content_id, feconf.DEFAULT_SKILL_EXPLANATION), [], state_domain.RecordedVoiceovers.from_dict({ 'voiceovers_mapping': { explanation_content_id: {} } }), state_domain.WrittenTranslations.from_dict({ 'translations_mapping': { explanation_content_id: {} } })) model = skill_models.SkillModel( id='skill_id', description='description', language_code='en', misconceptions=[], rubrics=[{ 'difficulty': 'Easy', 'explanations': ['Easy explanation'] }, { 'difficulty': 'Medium', 'explanations': ['Medium explanation'] }, { 'difficulty': 'Hard', 'explanations': ['Hard explanation', html_content] }], skill_contents=skill_contents.to_dict(), next_misconception_id=1, misconceptions_schema_version=1, rubric_schema_version=2, skill_contents_schema_version=2, all_questions_merged=False ) commit_cmd_dicts = [commit_cmd.to_dict()] model.commit( 'user_id_admin', 'skill model created', commit_cmd_dicts) current_schema_version_swap = self.swap( feconf, 'CURRENT_RUBRIC_SCHEMA_VERSION', 5) with current_schema_version_swap: skill = skill_fetchers.get_skill_from_model(model) self.assertEqual(skill.rubric_schema_version, 5) self.assertEqual(skill.rubrics[0].difficulty, 'Easy') self.assertEqual(skill.rubrics[0].explanations, ['Easy explanation']) self.assertEqual(skill.rubrics[1].difficulty, 'Medium') self.assertEqual(skill.rubrics[1].explanations, ['Medium explanation']) self.assertEqual(skill.rubrics[2].difficulty, 'Hard') self.assertEqual( skill.rubrics[2].explanations, ['Hard explanation', expected_html_content])
oppia/oppia
core/domain/skill_services_test.py
Python
apache-2.0
69,501
# This deliberately raises an exception, because we do not expect it to be # loaded in the unit test - yaml_packages/versioned/2.0 will take precedence. raise Exception("This package.py should never be loaded") # Copyright 2013-2016 Allan Johns. # # This library is free software: you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation, either # version 3 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>.
cwmartin/rez
src/rez/tests/data/packages/py_packages/versioned/2.0/package.py
Python
lgpl-3.0
905
test_records = [ [{ "doctype": "Item Group", "item_group_name": "_Test Item Group", "parent_item_group": "All Item Groups", "is_group": "No" }], [{ "doctype": "Item Group", "item_group_name": "_Test Item Group Desktops", "parent_item_group": "All Item Groups", "is_group": "No" }], ]
gangadhar-kadam/mtn-erpnext
setup/doctype/item_group/test_item_group.py
Python
agpl-3.0
303
from django.contrib.auth.models import AnonymousUser from django.core.exceptions import ImproperlyConfigured import commonware.log from rest_framework.permissions import BasePermission, SAFE_METHODS from access import acl log = commonware.log.getLogger('mkt.collections') class CuratorAuthorization(BasePermission): """ Permission class governing ability to interact with Collection-related APIs. Rules: - All users may make GET, HEAD, OPTIONS requests. - Users with Collections:Curate may make any request. - Users in Collection().curators may make any request using a verb in the curator_verbs property. Note: rest-framework does not allow for situations where a user fails has_permission but passes has_object_permission, so the logic determining whether a user is a curator or has the Collections:Curate permission is abstracted from those methods and situationally called in each. """ allow_public_safe_requests = True curator_verbs = ['POST', 'PUT', 'PATCH'] def is_public_safe_request(self, request): return (self.allow_public_safe_requests and request.method in SAFE_METHODS) def is_curator_for(self, request, obj): if isinstance(request.user, AnonymousUser): return False return (obj.has_curator(request.user) and request.method in self.curator_verbs) def has_curate_permission(self, request): return acl.action_allowed(request, 'Collections', 'Curate') def has_permission(self, request, view): if self.is_public_safe_request(request): return True try: obj = view.get_object() except ImproperlyConfigured: # i.e. We're calling get_object from a non-object view. return self.has_curate_permission(request) else: return (self.has_curate_permission(request) or self.is_curator_for(request, obj)) def has_object_permission(self, request, view, obj): if (self.is_public_safe_request(request) or self.has_curate_permission(request)): return True return self.is_curator_for(request, obj) class StrictCuratorAuthorization(CuratorAuthorization): """ The same as CuratorAuthorization, with GET / HEAD / OPTIONS requests disallowed for unauthorized users. """ allow_public_safe_requests = False curator_verbs = CuratorAuthorization.curator_verbs + SAFE_METHODS class CanBeHeroAuthorization(BasePermission): """ Only users with Collections:Curate can modify the can_be_hero field. """ def has_curate_permission(self, request): return CuratorAuthorization().has_curate_permission(request) def is_modifying_request(self, request): return request.method in ('PUT', 'PATCH', 'POST',) def hero_field_modified(self, request): if request.method == 'POST' and 'can_be_hero' in request.POST: return True elif request.method in ('PATCH', 'POST', 'PUT'): return (isinstance(request.DATA, dict) and 'can_be_hero' in request.DATA.keys()) return False def has_object_permission(self, request, view, obj): """ Returns false if the request is attempting to modify the can_be_hero field and the authenticating use does not have the Collections:Curate permission. """ return not (not self.has_curate_permission(request) and self.is_modifying_request(request) and self.hero_field_modified(request))
jinankjain/zamboni
mkt/collections/authorization.py
Python
bsd-3-clause
3,647
import pandas as pd from pandas.io import gbq def test_sepsis3_one_row_per_stay_id(dataset, project_id): """Verifies one stay_id per row of sepsis-3""" query = f""" SELECT COUNT(*) AS n FROM ( SELECT stay_id FROM {dataset}.sepsis3 GROUP BY 1 HAVING COUNT(*) > 1 ) s """ df = gbq.read_gbq(query, project_id=project_id, dialect="standard") n = df.loc[0, 'n'] assert n == 0, 'sepsis-3 table has more than one row per stay_id'
MIT-LCP/mimic-code
mimic-iv/tests/test_sepsis.py
Python
mit
481
# TO DO # Crashes sometimes? # In Autoplay, input can't tell if what's entered is a number and could crash # Add Saving # Add Play Again? # Import random from random import * from york_graphics import * from math import * import time from logic_module import * # ------------------------------------ Main ------------------------------------ # Might be overkill to put this in a function but it makes it easier # to comment out and avoid the pauses when testing Grid,LastGrid = InitGrid(Grid,LastGrid) # New prettier way to print the grid InitGraphics(Grid,Score) #PrintGrid(Grid) AutoMode,AutoTime = StartMenu(AutoMode,ValidMove,AutoTime) while StillGoing == True: # For Autoplay ValidMove = False GridStill = 0 ValidMove = False while ValidMove == False: # Main difference in Automode is that it provides a move instead # of asking for one, as much original code as possible is re-used if AutoMode == False: Movement = waitForKeyPress() #Movement = input('Enter your move: ') Grid,Score,ValidMove = MoveGrid(Grid,Movement,Score,ValidMove) GridChanged,GridStill = GridDifferent(Grid,LastGrid,GridChanged,GridStill) ValidMove = GridChanged else: Movement,AutoCount = AutoPlay(Grid,GridStill,AutoCount,AutoTime) Grid,Score,ValidMove = MoveGrid(Grid,Movement,Score,ValidMove) GridChanged,GridStill = GridDifferent(Grid,LastGrid,GridChanged,GridStill) ValidMove = GridChanged PlayerWon = GameWon(Grid,PlayerWon) StillGoing = not PlayerWon if StillGoing == True: SpawnNumber(Grid) LastGrid,SpaceChanged = SpaceDifferent(Grid,LastGrid,SpaceChanged) PlayerLost = NoMoves(Grid,PlayerLost) StillGoing = not PlayerLost DrawGrid(Grid,Score,SpaceChanged) #PrintGrid(SpaceChanged) if StillGoing == False: print() print('Sorry, you lose!') print('You Scored: ' + str(Score))
Georgeleeh/2048-Clone
Main.py
Python
mit
2,109
import sqlalchemy as sql def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine token = sql.Table('token', meta, autoload=True) idx = sql.Index('ix_token_expires', token.c.expires) idx.create(migrate_engine) def downgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine token = sql.Table('token', meta, autoload=True) idx = sql.Index('ix_token_expires', token.c.expires) idx.drop(migrate_engine)
kwss/keystone
keystone/common/sql/migrate_repo/versions/024_add_index_to_expires.py
Python
apache-2.0
481
from django.conf.urls import url from backstage.email.views import EmailCreateView, EmailUpdateView, EmailAddRecipient, EmailDeleteRecipient, \ EmailPreview, EmailSend, EmailRecipientErrorReport from backstage.email.views import EmailList from .account.views import AccountList, AccountPrivilegeSwitch, AccountPasswordChange, AccountPolygonSwitch, \ AccountActiveSwitch, \ AccountSchoolList, AccountAddSchool, AccountEditSchool from .base_views import Index from .blog.views import BlogList, BlogRecommendSwitch, BlogVisibleSwitch from .contest.views import ContestList, ContestApplyRatingChanges, ContestWithdrawRatingChanges, \ ApplyGlobalChangesToRating from .problem.views import ProblemList, ProblemVisibleSwitch, ProblemTagList, ProblemTagCreate, ProblemTagEdit, \ ProblemArchiveList, ProblemArchiveEdit, ProblemArchiveCreate, ProblemSourceBatchEdit, ProblemTagDelete from .server.views import ServerCreate, ServerUpdate, ServerList, ServerDelete, ServerRefresh, ServerEnableOrDisable, \ ServerUpdateToken, ServerSynchronize, ServerProblemStatusList, ServerSemaphoreReset, RejudgeAllCrashedSubmission from .site.views import SiteSettingsUpdate app_name = "backstage" urlpatterns = [ url(r'^$', Index.as_view(), name='index'), url(r'^account/$', AccountList.as_view(), name='account'), url(r'^account/privilege/(?P<pk>\d+)/$', AccountPrivilegeSwitch.as_view(), name='account_privilege_switch'), url(r'^account/polygon/(?P<pk>\d+)/$', AccountPolygonSwitch.as_view(), name='account_polygon_switch'), url(r'^account/password/(?P<pk>\d+)/$', AccountPasswordChange.as_view(), name='account_password_change'), url(r'^account/active/(?P<pk>\d+)/$', AccountActiveSwitch.as_view(), name='account_active_switch'), url(r'^account/school/$', AccountSchoolList.as_view(), name='account_school'), url(r'^account/school/add/$', AccountAddSchool.as_view(), name='account_school_add'), url(r'^account/school/(?P<pk>\d+)/edit/$', AccountEditSchool.as_view(), name='account_school_edit'), url(r'^problem/$', ProblemList.as_view(), name='problem'), url(r'^problem/(?P<pk>\d+)/visible/$', ProblemVisibleSwitch.as_view(), name='problem_visible_switch'), url(r'^tags/$', ProblemTagList.as_view(), name='tags'), url(r'^tags/add/$', ProblemTagCreate.as_view(), name='tag_add'), url(r'^tags/(?P<pk>\d+)/edit/$', ProblemTagEdit.as_view(), name='tag_edit'), url(r'^tags/(?P<pk>\d+)/delete/$', ProblemTagDelete.as_view(), name='tag_delete'), url(r'^archive/$', ProblemArchiveList.as_view(), name='archive'), url(r'^archive/add/$', ProblemArchiveCreate.as_view(), name='archive_add'), url(r'^archive/(?P<pk>\d+)/edit/$', ProblemArchiveEdit.as_view(), name='archive_edit'), url(r'^problem/source/$', ProblemSourceBatchEdit.as_view(), name='source_edit'), url(r'^contest/$', ContestList.as_view(), name='contest'), url(r'^contest/(?P<cid>\d+)/ratings/apply/$', ContestApplyRatingChanges.as_view(), name='contest_apply_ratings'), url(r'^contest/(?P<cid>\d+)/ratings/withdraw/$', ContestWithdrawRatingChanges.as_view(), name='contest_withdraw_ratings'), url(r'^contest/ratings/global/$', ApplyGlobalChangesToRating.as_view(), name='contest_apply_ratings_global'), url(r'^server/$', ServerList.as_view(), name='server'), url(r'^server/create/$', ServerCreate.as_view(), name='server_create'), url(r'^server/(?P<pk>\d+)/edit/$', ServerUpdate.as_view(), name='server_edit'), url(r'^server/(?P<pk>\d+)/delete/$', ServerDelete.as_view(), name='server_delete'), url(r'^server/(?P<pk>\d+)/refresh/$', ServerRefresh.as_view(), name='server_refresh'), url(r'^server/(?P<pk>\d+)/enable/$', ServerEnableOrDisable.as_view(), name='server_enable'), url(r'^server/(?P<pk>\d+)/edit/token/$', ServerUpdateToken.as_view(), name='server_update_token'), url(r'^server/(?P<pk>\d+)/status/$', ServerProblemStatusList.as_view(), name='server_problem_status'), url(r'^server/(?P<pk>\d+)/synchronize/$', ServerSynchronize.as_view(), name='server_synchronize'), url(r'^server/semaphore/reset/$', ServerSemaphoreReset.as_view(), name='server_semaphore_reset'), url(r'^server/rejudge/crashed/$', RejudgeAllCrashedSubmission.as_view(), name='rejudge_crashed_submission'), url(r'^site/$', SiteSettingsUpdate.as_view(), name='site'), url(r'^blog/$', BlogList.as_view(), name='blog'), url(r'^blog/(?P<pk>\d+)/visible/$', BlogVisibleSwitch.as_view(), name='blog_visible_switch'), url(r'^blog/(?P<pk>\d+)/recommend/$', BlogRecommendSwitch.as_view(), name='blog_recommend_switch'), url(r'^email/$', EmailList.as_view(), name='email'), url(r'^email/create/$', EmailCreateView.as_view(), name='email_create'), url(r'^email/(?P<eid>\d+)/update/$', EmailUpdateView.as_view(), name='email_update'), url(r'^email/(?P<eid>\d+)/recipient/add/$', EmailAddRecipient.as_view(), name='email_recipient_add'), url(r'^email/recipient/(?P<pk>\d+)/error/$', EmailRecipientErrorReport.as_view(), name='email_recipient_error'), url(r'^email/recipient/(?P<pk>\d+)/delete/$', EmailDeleteRecipient.as_view(), name='email_recipient_delete'), url(r'^email/(?P<eid>\d+)/preview/$', EmailPreview.as_view(), name='email_preview'), url(r'^email/(?P<eid>\d+)/send/$', EmailSend.as_view(), name='email_send'), ]
ultmaster/eoj3
backstage/urls.py
Python
mit
5,245
#!/usr/bin/env python2.7 # encoding: utf-8 """Fastavro decoding benchmark.""" from io import BytesIO from itertools import repeat from time import time from fastavro import dump, load, acquaint_schema, reader as avro_reader import sys LOOPS = 2 with open(sys.argv[1]) as reader: records = avro_reader(reader) SCHEMA = records.schema BUFS = [] for record in records: buf = BytesIO() dump(buf, record, SCHEMA) BUFS.append(buf) start = time() n = 0 for _ in repeat(None, LOOPS): for buf in BUFS: n += 1 buf.seek(0) record = load(buf, SCHEMA) print 1000. * (time() - start) / n
mtth/avsc
etc/benchmarks/avro-serialization-implementations/scripts/decode/python-fastavro.py
Python
mit
614
""" Copyright (c) 2012-2020 RockStor, Inc. <http://rockstor.com> This file is part of RockStor. RockStor is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. RockStor is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from django.db import models from storageadmin.models import Pool class PoolScrub(models.Model): pool = models.ForeignKey(Pool) # with a max of 10 chars we use 'halted' to indicated 'interrupted' status = models.CharField(max_length=10, default="started") # pid is the process id of a scrub job pid = models.IntegerField() start_time = models.DateTimeField(auto_now=True) end_time = models.DateTimeField(null=True) time_left = models.BigIntegerField(default=0) eta = models.DateTimeField(null=True) rate = models.CharField(max_length=15, default="") kb_scrubbed = models.BigIntegerField(null=True) data_extents_scrubbed = models.BigIntegerField(default=0) tree_extents_scrubbed = models.BigIntegerField(default=0) tree_bytes_scrubbed = models.BigIntegerField(default=0) read_errors = models.IntegerField(default=0) csum_errors = models.IntegerField(default=0) verify_errors = models.IntegerField(default=0) no_csum = models.IntegerField(default=0) csum_discards = models.IntegerField(default=0) super_errors = models.IntegerField(default=0) malloc_errors = models.IntegerField(default=0) uncorrectable_errors = models.IntegerField(default=0) unverified_errors = models.IntegerField(default=0) corrected_errors = models.IntegerField(default=0) last_physical = models.BigIntegerField(default=0) class Meta: app_label = "storageadmin"
phillxnet/rockstor-core
src/rockstor/storageadmin/models/scrub.py
Python
gpl-3.0
2,153
''' Given a binary tree, find its maximum depth. The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node. ''' # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def maxDepth(self, root): """ :type root: TreeNode :rtype: int """ if root is None: return 0 return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
wufangjie/leetcode
104. Maximum Depth of Binary Tree.py
Python
gpl-3.0
590