code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_pearson_phi.
This module contains unit tests for abydos.distance.PearsonPhi
"""
import unittest
from abydos.distance import PearsonPhi
class PearsonPhiTestCases(unittest.TestCase):
"""Test PearsonPhi functions.
abydos.distance.PearsonPhi
"""
cmp = PearsonPhi()
cmp_no_d = PearsonPhi(alphabet=0)
def test_pearson_phi_sim(self):
"""Test abydos.distance.PearsonPhi.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1.0)
self.assertEqual(self.cmp.sim('a', ''), 0.5)
self.assertEqual(self.cmp.sim('', 'a'), 0.5)
self.assertEqual(self.cmp.sim('abc', ''), 0.5)
self.assertEqual(self.cmp.sim('', 'abc'), 0.5)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 0.496790757381258)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.7480719794)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.7480719794)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.7480719794)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.7480719794)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.8314623708
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim('', ''), 1.0)
self.assertEqual(self.cmp_no_d.sim('a', ''), 0.5)
self.assertEqual(self.cmp_no_d.sim('', 'a'), 0.5)
self.assertEqual(self.cmp_no_d.sim('abc', ''), 0.5)
self.assertEqual(self.cmp_no_d.sim('', 'abc'), 0.5)
self.assertEqual(self.cmp_no_d.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp_no_d.sim('Nigel', 'Niall'), 0.25)
self.assertAlmostEqual(self.cmp_no_d.sim('Niall', 'Nigel'), 0.25)
self.assertAlmostEqual(self.cmp_no_d.sim('Colin', 'Coiln'), 0.25)
self.assertAlmostEqual(self.cmp_no_d.sim('Coiln', 'Colin'), 0.25)
self.assertAlmostEqual(
self.cmp_no_d.sim('ATCAACGAGT', 'AACGATTAG'), 0.3348554352
)
def test_pearson_phi_dist(self):
"""Test abydos.distance.PearsonPhi.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 0.5)
self.assertEqual(self.cmp.dist('', 'a'), 0.5)
self.assertEqual(self.cmp.dist('abc', ''), 0.5)
self.assertEqual(self.cmp.dist('', 'abc'), 0.5)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 0.503209242618742)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.2519280206)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.2519280206)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.2519280206)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.2519280206)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.1685376292
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.dist('', ''), 0.0)
self.assertEqual(self.cmp_no_d.dist('a', ''), 0.5)
self.assertEqual(self.cmp_no_d.dist('', 'a'), 0.5)
self.assertEqual(self.cmp_no_d.dist('abc', ''), 0.5)
self.assertEqual(self.cmp_no_d.dist('', 'abc'), 0.5)
self.assertEqual(self.cmp_no_d.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp_no_d.dist('Nigel', 'Niall'), 0.75)
self.assertAlmostEqual(self.cmp_no_d.dist('Niall', 'Nigel'), 0.75)
self.assertAlmostEqual(self.cmp_no_d.dist('Colin', 'Coiln'), 0.75)
self.assertAlmostEqual(self.cmp_no_d.dist('Coiln', 'Colin'), 0.75)
self.assertAlmostEqual(
self.cmp_no_d.dist('ATCAACGAGT', 'AACGATTAG'), 0.6651445648
)
def test_pearson_phi_corr(self):
"""Test abydos.distance.PearsonPhi.corr."""
# Base cases
self.assertEqual(self.cmp.corr('', ''), 1.0)
self.assertEqual(self.cmp.corr('a', ''), 0.0)
self.assertEqual(self.cmp.corr('', 'a'), 0.0)
self.assertEqual(self.cmp.corr('abc', ''), 0.0)
self.assertEqual(self.cmp.corr('', 'abc'), 0.0)
self.assertEqual(self.cmp.corr('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.corr('abcd', 'efgh'), -0.006418485237483954)
self.assertAlmostEqual(self.cmp.corr('Nigel', 'Niall'), 0.4961439589)
self.assertAlmostEqual(self.cmp.corr('Niall', 'Nigel'), 0.4961439589)
self.assertAlmostEqual(self.cmp.corr('Colin', 'Coiln'), 0.4961439589)
self.assertAlmostEqual(self.cmp.corr('Coiln', 'Colin'), 0.4961439589)
self.assertAlmostEqual(
self.cmp.corr('ATCAACGAGT', 'AACGATTAG'), 0.6629247416
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.corr('', ''), 1.0)
self.assertEqual(self.cmp_no_d.corr('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.corr('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.corr('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.corr('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.corr('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.corr('abcd', 'efgh'), -1.0)
self.assertAlmostEqual(self.cmp_no_d.corr('Nigel', 'Niall'), -0.5)
self.assertAlmostEqual(self.cmp_no_d.corr('Niall', 'Nigel'), -0.5)
self.assertAlmostEqual(self.cmp_no_d.corr('Colin', 'Coiln'), -0.5)
self.assertAlmostEqual(self.cmp_no_d.corr('Coiln', 'Colin'), -0.5)
self.assertAlmostEqual(
self.cmp_no_d.corr('ATCAACGAGT', 'AACGATTAG'), -0.3302891295
)
if __name__ == '__main__':
unittest.main()
| chrislit/abydos | tests/distance/test_distance_pearson_phi.py | Python | gpl-3.0 | 6,576 |
import os, sys
from optparse import make_option
from django.contrib.gis import gdal
from django.contrib.gis.management.base import ArgsCommand, CommandError
def layer_option(option, opt, value, parser):
"""
Callback for `make_option` for the `ogrinspect` `layer_key`
keyword option which may be an integer or a string.
"""
try:
dest = int(value)
except ValueError:
dest = value
setattr(parser.values, option.dest, dest)
def list_option(option, opt, value, parser):
"""
Callback for `make_option` for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
if value.lower() == 'true':
dest = True
else:
dest = [s for s in value.split(',')]
setattr(parser.values, option.dest, dest)
class Command(ArgsCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
args = '[data_source] [model_name]'
option_list = ArgsCommand.option_list + (
make_option('--blank', dest='blank', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--decimal', dest='decimal', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.'),
make_option('--geom-name', dest='geom_name', type='string', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)'),
make_option('--layer', dest='layer_key', type='string', action='callback',
callback=layer_option, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.'),
make_option('--multi-geom', action='store_true', dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.'),
make_option('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__` function.'),
make_option('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` '
'statement.'),
make_option('--null', dest='null', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.'),
make_option('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
)
requires_model_validation = False
def handle_args(self, *args, **options):
try:
data_source, model_name = args
except ValueError:
raise CommandError('Invalid arguments, must provide: %s' % self.args)
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# TODO: Support non file-based OGR datasources.
if not os.path.isfile(data_source):
raise CommandError('The given data source cannot be found: "%s"' % data_source)
# Removing options with `None` values.
options = dict([(k, v) for k, v in options.items() if not v is None])
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.OGRException, msg:
raise CommandError(msg)
# Whether the user wants to generate the LayerMapping dictionary as well.
show_mapping = options.pop('mapping', False)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
output = [s for s in _ogrinspect(ds, model_name, **options)]
if show_mapping:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name' : options['geom_name'],
'layer_key' : options['layer_key'],
'multi_geom' : options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = dict([(v, k) for k, v in mapping_dict.items()])
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend([" '%s' : '%s'," % (rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields])
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output)
| Shrews/PyGerrit | webapp/django/contrib/gis/management/commands/ogrinspect.py | Python | apache-2.0 | 6,113 |
# -*- coding: utf-8 -*-
from openerp import models,fields, _
"""
Este modulo crea el modelo Usuario
"""
#Se crea la clase Usuario
class Usuario(models.Model):
_inherit = 'res.users'
alias = fields.Char()
equipos_ids = fields.Many2many("equipment.control",
ondelete='set null',string="Equipos", index=True)
| andyrgtz/Proyecto-Triples | computer_equipment_control/model/usuario.py | Python | apache-2.0 | 359 |
class Solution:
def add(self, num1, num2):
num = []
carry = 0
maxLen = max(len(num1), len(num2))
num1.extend(itertools.repeat(0, maxLen - len(num1)))
num2.extend(itertools.repeat(0, maxLen - len(num2)))
for a, b in zip(num1, num2):
sum = a + b + carry
num.append(sum % 10)
carry = sum / 10
if carry:
num.append(carry)
return num
def multiply_digit(self, num, digit, place):
res = [0] * place
carry = 0
for d in num:
prod = digit * d + carry
res.append(prod % 10)
carry = prod / 10
if carry:
res.append(carry)
return res
def multiply_numbers(self, num1, num2):
res = [0]
place = 0
for d in num2:
prod = self.multiply_digit(num1, d, place)
res = self.add(res, prod)
place += 1
return res
def multiply(self, num1, num2):
if num1 == '0' or num2 == '0':
return '0'
num1List = list(map(int, reversed(num1)))
num2List = list(map(int, reversed(num2)))
res = self.multiply_numbers(num1List, num2List)
return ''.join(map(str, reversed(res)))
| rahul-ramadas/leetcode | multiply-strings/Solution.9260967.py | Python | mit | 1,343 |
"""
Copyright 2013 Lyst Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
LOOP_BALANCED_STOCHASTIC = 1
LOOP_STOCHASTIC = 2
PREDICTION_LINEAR = 1
PREDICTION_LOGISTIC = 2
LEARNER_PEGASOS_SVM = 1
LEARNER_PEGASOS_LOGREG = 2
# defaults for hyperparameters
DFLT_ITERATIONS = 100000
DFLT_LAMBDA_REG = 0.1
# make sure we protect against lambda * eta > 1.0 which
# causes numerical issues for regularization and projection
MIN_SCALING_FACTOR = 0.0000001
MIN_SCALE = 0.00000000001
| aalto-ics-kepaco/softALIGNF | svm_code/pegasos/pegasos/constants.py | Python | apache-2.0 | 1,002 |
from gaphor.abc import Service
class Session(Service):
"""Application service.
Get the active session.
"""
def __init__(self, application):
self.application = application
def shutdown(self):
pass
def get_service(self, name):
assert self.application.active_session
return self.application.active_session.get_service(name)
| amolenaar/gaphor | gaphor/services/session.py | Python | lgpl-2.1 | 383 |
from .parser import *
| zachwalton/truebpm | simfile/__init__.py | Python | mit | 22 |
#!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from os import path
try:
import ycm_core
except ImportError as e:
raise RuntimeError(
'Error importing ycm_core. Are you sure you have placed a '
'version 3.2+ libclang.[so|dll|dylib] in folder "{0}"? '
'See the Installation Guide in the docs. Full error: {1}'.format(
path.realpath( path.join( path.abspath( __file__ ), '../..' ) ),
str( e ) ) )
import atexit
import logging
import json
import bottle
import httplib
from bottle import request, response
import server_state
from ycmd import user_options_store
from ycmd.responses import BuildExceptionResponse, BuildCompletionResponse
from ycmd import hmac_plugin
from ycmd import extra_conf_store
from ycmd.request_wrap import RequestWrap
# num bytes for the request body buffer; request.json only works if the request
# size is less than this
bottle.Request.MEMFILE_MAX = 1000 * 1024
_server_state = None
_hmac_secret = None
_logger = logging.getLogger( __name__ )
app = bottle.Bottle()
@app.post( '/event_notification' )
def EventNotification():
_logger.info( 'Received event notification' )
request_data = RequestWrap( request.json )
event_name = request_data[ 'event_name' ]
_logger.debug( 'Event name: %s', event_name )
event_handler = 'On' + event_name
getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )
filetypes = request_data[ 'filetypes' ]
response_data = None
if _server_state.FiletypeCompletionUsable( filetypes ):
response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),
event_handler )( request_data )
if response_data:
return _JsonResponse( response_data )
return _JsonResponse( {} )
@app.post( '/run_completer_command' )
def RunCompleterCommand():
_logger.info( 'Received command request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.OnUserCommand(
request_data[ 'command_arguments' ],
request_data ) )
@app.post( '/completions' )
def GetCompletions():
_logger.info( 'Received completion request' )
request_data = RequestWrap( request.json )
do_filetype_completion = _server_state.ShouldUseFiletypeCompleter(
request_data )
_logger.debug( 'Using filetype completion: %s', do_filetype_completion )
filetypes = request_data[ 'filetypes' ]
completer = ( _server_state.GetFiletypeCompleter( filetypes ) if
do_filetype_completion else
_server_state.GetGeneralCompleter() )
return _JsonResponse( BuildCompletionResponse(
completer.ComputeCandidates( request_data ),
request_data.CompletionStartColumn() ) )
@app.get( '/healthy' )
def GetHealthy():
_logger.info( 'Received health request' )
if request.query.include_subservers:
cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )
return _JsonResponse( cs_completer.ServerIsRunning() )
return _JsonResponse( True )
@app.get( '/ready' )
def GetReady():
_logger.info( 'Received ready request' )
if request.query.include_subservers:
cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )
return _JsonResponse( cs_completer.ServerIsReady() )
return _JsonResponse( True )
@app.post( '/semantic_completion_available' )
def FiletypeCompletionAvailable():
_logger.info( 'Received filetype completion available request' )
return _JsonResponse( _server_state.FiletypeCompletionAvailable(
RequestWrap( request.json )[ 'filetypes' ] ) )
@app.post( '/defined_subcommands' )
def DefinedSubcommands():
_logger.info( 'Received defined subcommands request' )
completer = _GetCompleterForRequestData( RequestWrap( request.json ) )
return _JsonResponse( completer.DefinedSubcommands() )
@app.post( '/detailed_diagnostic' )
def GetDetailedDiagnostic():
_logger.info( 'Received detailed diagnostic request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )
@app.post( '/load_extra_conf_file' )
def LoadExtraConfFile():
_logger.info( 'Received extra conf load request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Load( request_data[ 'filepath' ], force = True )
@app.post( '/ignore_extra_conf_file' )
def IgnoreExtraConfFile():
_logger.info( 'Received extra conf ignore request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Disable( request_data[ 'filepath' ] )
@app.post( '/debug_info' )
def DebugInfo():
_logger.info( 'Received debug info request' )
output = []
has_clang_support = ycm_core.HasClangSupport()
output.append( 'Server has Clang support compiled in: {0}'.format(
has_clang_support ) )
if has_clang_support:
output.append( 'Clang version: ' + ycm_core.ClangVersion() )
request_data = RequestWrap( request.json )
try:
output.append(
_GetCompleterForRequestData( request_data ).DebugInfo( request_data) )
except:
pass
return _JsonResponse( '\n'.join( output ) )
# The type of the param is Bottle.HTTPError
@app.error( httplib.INTERNAL_SERVER_ERROR )
def ErrorHandler( httperror ):
body = _JsonResponse( BuildExceptionResponse( httperror.exception,
httperror.traceback ) )
hmac_plugin.SetHmacHeader( body, _hmac_secret )
return body
def _JsonResponse( data ):
response.set_header( 'Content-Type', 'application/json' )
return json.dumps( data, default = _UniversalSerialize )
def _UniversalSerialize( obj ):
try:
serialized = obj.__dict__.copy()
serialized[ 'TYPE' ] = type( obj ).__name__
return serialized
except AttributeError:
return str( obj )
def _GetCompleterForRequestData( request_data ):
completer_target = request_data.get( 'completer_target', None )
if completer_target == 'identifier':
return _server_state.GetGeneralCompleter().GetIdentifierCompleter()
elif completer_target == 'filetype_default' or not completer_target:
return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )
else:
return _server_state.GetFiletypeCompleter( [ completer_target ] )
@atexit.register
def ServerShutdown():
_logger.info( 'Server shutting down' )
if _server_state:
_server_state.Shutdown()
extra_conf_store.Shutdown()
def SetHmacSecret( hmac_secret ):
global _hmac_secret
_hmac_secret = hmac_secret
def UpdateUserOptions( options ):
global _server_state
if not options:
return
# This should never be passed in, but let's try to remove it just in case.
options.pop( 'hmac_secret', None )
user_options_store.SetAll( options )
_server_state = server_state.ServerState( options )
def SetServerStateToDefaults():
global _server_state, _logger
_logger = logging.getLogger( __name__ )
user_options_store.LoadDefaults()
_server_state = server_state.ServerState( user_options_store.GetAll() )
extra_conf_store.Reset()
| tokuhirom/ycmd | ycmd/handlers.py | Python | gpl-3.0 | 7,766 |
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Language model"""
from sqlalchemy import Column
from sqlalchemy.types import Unicode, DateTime
from onlinelinguisticdatabase.model.meta import Base, now
class Language(Base):
__tablename__ = 'language'
def __repr__(self):
return '<Language (%s)>' % self.Id
Id = Column(Unicode(3), primary_key=True)
Part2B = Column(Unicode(3))
Part2T = Column(Unicode(3))
Part1 = Column(Unicode(2))
Scope = Column(Unicode(1))
Type = Column(Unicode(1))
Ref_Name = Column(Unicode(150))
Comment = Column(Unicode(150))
datetime_modified = Column(DateTime, default=now)
| jrwdunham/old | onlinelinguisticdatabase/model/language.py | Python | apache-2.0 | 1,192 |
import config
import redis
import json
r = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB)
try:
expTime=config.BTCBAL_CACHE
except:
expTime=600
def rGet(key):
return r.get(key)
def rSet(key,value):
return r.set(key,value)
def rExpire(key,sec):
return r.expire(key,sec)
def rDelete(key):
return r.delete(key)
def rKeys(key):
return r.keys(key)
def rSetNotUpdateBTC(baldata):
fresh=baldata['fresh']
if fresh!=None and len(fresh)>0:
for addr in fresh:
rSet("omniwallet:balances:address:"+str(addr),json.dumps( {"bal":baldata['bal'][addr],"error":None}))
rExpire("omniwallet:balances:address:"+str(addr),expTime)
def rExpireAllBalBTC():
for addr in rKeys("omniwallet:balances:address:*"):
rDelete(addr)
| achamely/omniwallet | api/cacher.py | Python | agpl-3.0 | 786 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import operator
import sys
import uuid
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from multiprocessing.pool import ThreadPool
from pyspark import keyword_only, since, SparkContext
from pyspark.ml import Estimator, Predictor, PredictionModel, Model
from pyspark.ml.param.shared import HasRawPredictionCol, HasProbabilityCol, HasThresholds, \
HasRegParam, HasMaxIter, HasFitIntercept, HasTol, HasStandardization, HasWeightCol, \
HasAggregationDepth, HasThreshold, HasBlockSize, HasMaxBlockSizeInMB, Param, Params, \
TypeConverters, HasElasticNetParam, HasSeed, HasStepSize, HasSolver, HasParallelism
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _RandomForestParams, _GBTParams, \
_HasVarianceImpurity, _TreeClassifierParams
from pyspark.ml.regression import _FactorizationMachinesParams, DecisionTreeRegressionModel
from pyspark.ml.base import _PredictorParams
from pyspark.ml.util import DefaultParamsReader, DefaultParamsWriter, \
JavaMLReadable, JavaMLReader, JavaMLWritable, JavaMLWriter, \
MLReader, MLReadable, MLWriter, MLWritable, HasTrainingSummary
from pyspark.ml.wrapper import JavaParams, \
JavaPredictor, JavaPredictionModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.sql import DataFrame
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LinearSVC', 'LinearSVCModel',
'LinearSVCSummary', 'LinearSVCTrainingSummary',
'LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'RandomForestClassificationSummary', 'RandomForestClassificationTrainingSummary',
'BinaryRandomForestClassificationSummary',
'BinaryRandomForestClassificationTrainingSummary',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'MultilayerPerceptronClassificationSummary',
'MultilayerPerceptronClassificationTrainingSummary',
'OneVsRest', 'OneVsRestModel',
'FMClassifier', 'FMClassificationModel', 'FMClassificationSummary',
'FMClassificationTrainingSummary']
class _ClassifierParams(HasRawPredictionCol, _PredictorParams):
"""
Classifier Params for classification tasks.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class Classifier(Predictor, _ClassifierParams, metaclass=ABCMeta):
"""
Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class ClassificationModel(PredictionModel, _ClassifierParams, metaclass=ABCMeta):
"""
Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@abstractproperty
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
raise NotImplementedError()
@abstractmethod
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
raise NotImplementedError()
class _ProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, _ClassifierParams):
"""
Params for :py:class:`ProbabilisticClassifier` and
:py:class:`ProbabilisticClassificationModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class ProbabilisticClassifier(Classifier, _ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Probabilistic Classifier for classification tasks.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@inherit_doc
class ProbabilisticClassificationModel(ClassificationModel,
_ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@abstractmethod
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
raise NotImplementedError()
@inherit_doc
class _JavaClassifier(Classifier, JavaPredictor, metaclass=ABCMeta):
"""
Java Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class _JavaClassificationModel(ClassificationModel, JavaPredictionModel):
"""
Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with :class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
return self._call_java("predictRaw", value)
@inherit_doc
class _JavaProbabilisticClassifier(ProbabilisticClassifier, _JavaClassifier,
metaclass=ABCMeta):
"""
Java Probabilistic Classifier for classification tasks.
"""
pass
@inherit_doc
class _JavaProbabilisticClassificationModel(ProbabilisticClassificationModel,
_JavaClassificationModel):
"""
Java Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class _ClassificationSummary(JavaWrapper):
"""
Abstraction for multiclass classification results for a given model.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("3.1.0")
def predictionCol(self):
"""
Field in "predictions" which gives the prediction of each class.
"""
return self._call_java("predictionCol")
@property
@since("3.1.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("3.1.0")
def weightCol(self):
"""
Field in "predictions" which gives the weight of each instance
as a vector.
"""
return self._call_java("weightCol")
@property
def labels(self):
"""
Returns the sequence of labels in ascending order. This order matches the order used
in metrics which are specified as arrays over labels, e.g., truePositiveRateByLabel.
.. versionadded:: 3.1.0
Notes
-----
In most cases, it will be values {0.0, 1.0, ..., numClasses-1}, However, if the
training set is missing a label, then all of the arrays over labels
(e.g., from truePositiveRateByLabel) will be of length numClasses-1 instead of the
expected numClasses.
"""
return self._call_java("labels")
@property
@since("3.1.0")
def truePositiveRateByLabel(self):
"""
Returns true positive rate for each label (category).
"""
return self._call_java("truePositiveRateByLabel")
@property
@since("3.1.0")
def falsePositiveRateByLabel(self):
"""
Returns false positive rate for each label (category).
"""
return self._call_java("falsePositiveRateByLabel")
@property
@since("3.1.0")
def precisionByLabel(self):
"""
Returns precision for each label (category).
"""
return self._call_java("precisionByLabel")
@property
@since("3.1.0")
def recallByLabel(self):
"""
Returns recall for each label (category).
"""
return self._call_java("recallByLabel")
@since("3.1.0")
def fMeasureByLabel(self, beta=1.0):
"""
Returns f-measure for each label (category).
"""
return self._call_java("fMeasureByLabel", beta)
@property
@since("3.1.0")
def accuracy(self):
"""
Returns accuracy.
(equals to the total number of correctly classified instances
out of the total number of instances.)
"""
return self._call_java("accuracy")
@property
@since("3.1.0")
def weightedTruePositiveRate(self):
"""
Returns weighted true positive rate.
(equals to precision, recall and f-measure)
"""
return self._call_java("weightedTruePositiveRate")
@property
@since("3.1.0")
def weightedFalsePositiveRate(self):
"""
Returns weighted false positive rate.
"""
return self._call_java("weightedFalsePositiveRate")
@property
@since("3.1.0")
def weightedRecall(self):
"""
Returns weighted averaged recall.
(equals to precision, recall and f-measure)
"""
return self._call_java("weightedRecall")
@property
@since("3.1.0")
def weightedPrecision(self):
"""
Returns weighted averaged precision.
"""
return self._call_java("weightedPrecision")
@since("3.1.0")
def weightedFMeasure(self, beta=1.0):
"""
Returns weighted averaged f-measure.
"""
return self._call_java("weightedFMeasure", beta)
@inherit_doc
class _TrainingSummary(JavaWrapper):
"""
Abstraction for Training results.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration. It contains one more element, the initial state,
than number of iterations.
"""
return self._call_java("objectiveHistory")
@property
@since("3.1.0")
def totalIterations(self):
"""
Number of training iterations until termination.
"""
return self._call_java("totalIterations")
@inherit_doc
class _BinaryClassificationSummary(_ClassificationSummary):
"""
Binary classification results for a given model.
.. versionadded:: 3.1.0
"""
@property
@since("3.1.0")
def scoreCol(self):
"""
Field in "predictions" which gives the probability or raw prediction
of each class as a vector.
"""
return self._call_java("scoreCol")
@property
def roc(self):
"""
Returns the receiver operating characteristic (ROC) curve,
which is a Dataframe having two fields (FPR, TPR) with
(0.0, 0.0) prepended and (1.0, 1.0) appended to it.
.. versionadded:: 3.1.0
Notes
-----
`Wikipedia reference <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
"""
return self._call_java("roc")
@property
@since("3.1.0")
def areaUnderROC(self):
"""
Computes the area under the receiver operating characteristic
(ROC) curve.
"""
return self._call_java("areaUnderROC")
@property
@since("3.1.0")
def pr(self):
"""
Returns the precision-recall curve, which is a Dataframe
containing two fields recall, precision with (0.0, 1.0) prepended
to it.
"""
return self._call_java("pr")
@property
@since("3.1.0")
def fMeasureByThreshold(self):
"""
Returns a dataframe with two fields (threshold, F-Measure) curve
with beta = 1.0.
"""
return self._call_java("fMeasureByThreshold")
@property
@since("3.1.0")
def precisionByThreshold(self):
"""
Returns a dataframe with two fields (threshold, precision) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the precision.
"""
return self._call_java("precisionByThreshold")
@property
@since("3.1.0")
def recallByThreshold(self):
"""
Returns a dataframe with two fields (threshold, recall) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the recall.
"""
return self._call_java("recallByThreshold")
class _LinearSVCParams(_ClassifierParams, HasRegParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth, HasThreshold,
HasMaxBlockSizeInMB):
"""
Params for :py:class:`LinearSVC` and :py:class:`LinearSVCModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"The threshold in binary classification applied to the linear model"
" prediction. This threshold can be any real number, where Inf will make"
" all predictions 0.0 and -Inf will make all predictions 1.0.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_LinearSVCParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, threshold=0.0, aggregationDepth=2,
maxBlockSizeInMB=0.0)
@inherit_doc
class LinearSVC(_JavaClassifier, _LinearSVCParams, JavaMLWritable, JavaMLReadable):
"""
This binary classifier optimizes the Hinge Loss using the OWLQN optimizer.
Only supports L2 regularization currently.
.. versionadded:: 2.2.0
Notes
-----
`Linear SVM Classifier <https://en.wikipedia.org/wiki/Support_vector_machine#Linear_SVM>`_
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = sc.parallelize([
... Row(label=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(label=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> svm = LinearSVC()
>>> svm.getMaxIter()
100
>>> svm.setMaxIter(5)
LinearSVC...
>>> svm.getMaxIter()
5
>>> svm.getRegParam()
0.0
>>> svm.setRegParam(0.01)
LinearSVC...
>>> svm.getRegParam()
0.01
>>> model = svm.fit(df)
>>> model.setPredictionCol("newPrediction")
LinearSVCModel...
>>> model.getPredictionCol()
'newPrediction'
>>> model.setThreshold(0.5)
LinearSVCModel...
>>> model.getThreshold()
0.5
>>> model.getMaxBlockSizeInMB()
0.0
>>> model.coefficients
DenseVector([0.0, -0.2792, -0.1833])
>>> model.intercept
1.0206118982229047
>>> model.numClasses
2
>>> model.numFeatures
3
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, -1.0, -1.0))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.4831, 1.4831])
>>> result = model.transform(test0).head()
>>> result.newPrediction
1.0
>>> result.rawPrediction
DenseVector([-1.4831, 1.4831])
>>> svm_path = temp_path + "/svm"
>>> svm.save(svm_path)
>>> svm2 = LinearSVC.load(svm_path)
>>> svm2.getMaxIter()
5
>>> model_path = temp_path + "/svm_model"
>>> model.save(model_path)
>>> model2 = LinearSVCModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
super(LinearSVC, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LinearSVC", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, maxBlockSizeInMB=0.0):
Sets params for Linear SVM Classifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearSVCModel(java_model)
@since("2.2.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.2.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.2.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.2.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.2.0")
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
@since("2.2.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("2.2.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.2.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LinearSVCModel(_JavaClassificationModel, _LinearSVCParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by LinearSVC.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@property
@since("2.2.0")
def coefficients(self):
"""
Model coefficients of Linear SVM Classifier.
"""
return self._call_java("coefficients")
@property
@since("2.2.0")
def intercept(self):
"""
Model intercept of Linear SVM Classifier.
"""
return self._call_java("intercept")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return LinearSVCTrainingSummary(super(LinearSVCModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lsvc_summary = self._call_java("evaluate", dataset)
return LinearSVCSummary(java_lsvc_summary)
class LinearSVCSummary(_BinaryClassificationSummary):
"""
Abstraction for LinearSVC Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class LinearSVCTrainingSummary(LinearSVCSummary, _TrainingSummary):
"""
Abstraction for LinearSVC Training results.
.. versionadded:: 3.1.0
"""
pass
class _LogisticRegressionParams(_ProbabilisticClassifierParams, HasRegParam,
HasElasticNetParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth,
HasThreshold, HasMaxBlockSizeInMB):
"""
Params for :py:class:`LogisticRegression` and :py:class:`LogisticRegressionModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
lowerBoundsOnCoefficients = Param(Params._dummy(), "lowerBoundsOnCoefficients",
"The lower bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
upperBoundsOnCoefficients = Param(Params._dummy(), "upperBoundsOnCoefficients",
"The upper bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
lowerBoundsOnIntercepts = Param(Params._dummy(), "lowerBoundsOnIntercepts",
"The lower bounds on intercepts if fitting under bound "
"constrained optimization. The bounds vector size must be"
"equal with 1 for binomial regression, or the number of"
"lasses for multinomial regression.",
typeConverter=TypeConverters.toVector)
upperBoundsOnIntercepts = Param(Params._dummy(), "upperBoundsOnIntercepts",
"The upper bounds on intercepts if fitting under bound "
"constrained optimization. The bound vector size must be "
"equal with 1 for binomial regression, or the number of "
"classes for multinomial regression.",
typeConverter=TypeConverters.toVector)
def __init__(self, *args):
super(_LogisticRegressionParams, self).__init__(*args)
self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto",
maxBlockSizeInMB=0.0)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self.clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self.clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: {0}".format(str(ts)))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getOrDefault(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
@since("2.3.0")
def getLowerBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self.getOrDefault(self.lowerBoundsOnCoefficients)
@since("2.3.0")
def getUpperBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self.getOrDefault(self.upperBoundsOnCoefficients)
@since("2.3.0")
def getLowerBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self.getOrDefault(self.lowerBoundsOnIntercepts)
@since("2.3.0")
def getUpperBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self.getOrDefault(self.upperBoundsOnIntercepts)
@inherit_doc
class LogisticRegression(_JavaProbabilisticClassifier, _LogisticRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
.. versionadded:: 1.3.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=1.0, features=Vectors.dense(0.0, 5.0)),
... Row(label=0.0, weight=2.0, features=Vectors.dense(1.0, 2.0)),
... Row(label=1.0, weight=3.0, features=Vectors.dense(2.0, 1.0)),
... Row(label=0.0, weight=4.0, features=Vectors.dense(3.0, 3.0))]).toDF()
>>> blor = LogisticRegression(weightCol="weight")
>>> blor.getRegParam()
0.0
>>> blor.setRegParam(0.01)
LogisticRegression...
>>> blor.getRegParam()
0.01
>>> blor.setMaxIter(10)
LogisticRegression...
>>> blor.getMaxIter()
10
>>> blor.clear(blor.maxIter)
>>> blorModel = blor.fit(bdf)
>>> blorModel.setFeaturesCol("features")
LogisticRegressionModel...
>>> blorModel.setProbabilityCol("newProbability")
LogisticRegressionModel...
>>> blorModel.getProbabilityCol()
'newProbability'
>>> blorModel.getMaxBlockSizeInMB()
0.0
>>> blorModel.setThreshold(0.1)
LogisticRegressionModel...
>>> blorModel.getThreshold()
0.1
>>> blorModel.coefficients
DenseVector([-1.080..., -0.646...])
>>> blorModel.intercept
3.112...
>>> blorModel.evaluate(bdf).accuracy == blorModel.summary.accuracy
True
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> mdf = spark.read.format("libsvm").load(data_path)
>>> mlor = LogisticRegression(regParam=0.1, elasticNetParam=1.0, family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> mlorModel.coefficientMatrix
SparseMatrix(3, 4, [0, 1, 2, 3], [3, 2, 1], [1.87..., -2.75..., -0.50...], 1)
>>> mlorModel.interceptVector
DenseVector([0.04..., -0.42..., 0.37...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 1.0))]).toDF()
>>> blorModel.predict(test0.head().features)
1.0
>>> blorModel.predictRaw(test0.head().features)
DenseVector([-3.54..., 3.54...])
>>> blorModel.predictProbability(test0.head().features)
DenseVector([0.028, 0.972])
>>> result = blorModel.transform(test0).head()
>>> result.prediction
1.0
>>> result.newProbability
DenseVector([0.02..., 0.97...])
>>> result.rawPrediction
DenseVector([-3.54..., 3.54...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> blor.save(lr_path)
>>> lr2 = LogisticRegression.load(lr_path)
>>> lr2.getRegParam()
0.01
>>> model_path = temp_path + "/lr_model"
>>> blorModel.save(model_path)
>>> model2 = LogisticRegressionModel.load(model_path)
>>> blorModel.coefficients[0] == model2.coefficients[0]
True
>>> blorModel.intercept == model2.intercept
True
>>> model2
LogisticRegressionModel: uid=..., numClasses=2, numFeatures=2
>>> blorModel.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto",
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None,
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None,
maxBlockSizeInMB=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto", \
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None, \
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None, \
maxBlockSizeInMB=0.0):
If the threshold and thresholds Params are both set, they must be equivalent.
"""
super(LogisticRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LogisticRegression", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._checkThresholdConsistency()
@keyword_only
@since("1.3.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto",
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None,
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None,
maxBlockSizeInMB=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto", \
lowerBoundsOnCoefficients=None, upperBoundsOnCoefficients=None, \
lowerBoundsOnIntercepts=None, upperBoundsOnIntercepts=None, \
maxBlockSizeInMB=0.0):
Sets params for logistic regression.
If the threshold and thresholds Params are both set, they must be equivalent.
"""
kwargs = self._input_kwargs
self._set(**kwargs)
self._checkThresholdConsistency()
return self
def _create_model(self, java_model):
return LogisticRegressionModel(java_model)
@since("2.1.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.3.0")
def setLowerBoundsOnCoefficients(self, value):
"""
Sets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self._set(lowerBoundsOnCoefficients=value)
@since("2.3.0")
def setUpperBoundsOnCoefficients(self, value):
"""
Sets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self._set(upperBoundsOnCoefficients=value)
@since("2.3.0")
def setLowerBoundsOnIntercepts(self, value):
"""
Sets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self._set(lowerBoundsOnIntercepts=value)
@since("2.3.0")
def setUpperBoundsOnIntercepts(self, value):
"""
Sets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self._set(upperBoundsOnIntercepts=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setMaxBlockSizeInMB(self, value):
"""
Sets the value of :py:attr:`maxBlockSizeInMB`.
"""
return self._set(maxBlockSizeInMB=value)
class LogisticRegressionModel(_JavaProbabilisticClassificationModel, _LogisticRegressionParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by LogisticRegression.
.. versionadded:: 1.3.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("intercept")
@property
@since("2.1.0")
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
@property
@since("2.1.0")
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
if self.numClasses <= 2:
return BinaryLogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
return LogisticRegressionTrainingSummary(super(LogisticRegressionModel,
self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 2.0.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_blr_summary = self._call_java("evaluate", dataset)
if self.numClasses <= 2:
return BinaryLogisticRegressionSummary(java_blr_summary)
else:
return LogisticRegressionSummary(java_blr_summary)
class LogisticRegressionSummary(_ClassificationSummary):
"""
Abstraction for Logistic Regression Results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def probabilityCol(self):
"""
Field in "predictions" which gives the probability
of each class as a vector.
"""
return self._call_java("probabilityCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@inherit_doc
class LogisticRegressionTrainingSummary(LogisticRegressionSummary, _TrainingSummary):
"""
Abstraction for multinomial Logistic Regression Training results.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class BinaryLogisticRegressionSummary(_BinaryClassificationSummary,
LogisticRegressionSummary):
"""
Binary Logistic regression results for a given model.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary,
LogisticRegressionTrainingSummary):
"""
Binary Logistic regression training results for a given model.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class _DecisionTreeClassifierParams(_DecisionTreeParams, _TreeClassifierParams):
"""
Params for :py:class:`DecisionTreeClassifier` and :py:class:`DecisionTreeClassificationModel`.
"""
def __init__(self, *args):
super(_DecisionTreeClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", leafCol="", minWeightFractionPerNode=0.0)
@inherit_doc
class DecisionTreeClassifier(_JavaProbabilisticClassifier, _DecisionTreeClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed", leafCol="leafId")
>>> model = dt.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
DecisionTreeClassificationModel...
>>> model.numNodes
3
>>> model.depth
1
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> model.numClasses
2
>>> print(model.toDebugString)
DecisionTreeClassificationModel...depth=1, numNodes=3...
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([1.0, 0.0])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([1.0, 0.0])
>>> result.rawPrediction
DenseVector([1.0, 0.0])
>>> result.leafId
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtc_path = temp_path + "/dtc"
>>> dt.save(dtc_path)
>>> dt2 = DecisionTreeClassifier.load(dtc_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtc_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> df3 = spark.createDataFrame([
... (1.0, 0.2, Vectors.dense(1.0)),
... (1.0, 0.8, Vectors.dense(1.0)),
... (0.0, 1.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> si3 = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model3 = si3.fit(df3)
>>> td3 = si_model3.transform(df3)
>>> dt3 = DecisionTreeClassifier(maxDepth=2, weightCol="weight", labelCol="indexed")
>>> model3 = dt3.fit(td3)
>>> print(model3.toDebugString)
DecisionTreeClassificationModel...depth=1, numNodes=3...
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0)
"""
super(DecisionTreeClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", seed=None, weightCol=None, leafCol="",
minWeightFractionPerNode=0.0):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None, weightCol=None, leafCol="", minWeightFractionPerNode=0.0)
Sets params for the DecisionTreeClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class DecisionTreeClassificationModel(_DecisionTreeModel, _JavaProbabilisticClassificationModel,
_DecisionTreeClassifierParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by DecisionTreeClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. versionadded:: 2.0.0
Notes
-----
Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestClassifier`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class _RandomForestClassifierParams(_RandomForestParams, _TreeClassifierParams):
"""
Params for :py:class:`RandomForestClassifier` and :py:class:`RandomForestClassificationModel`.
"""
def __init__(self, *args):
super(_RandomForestClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", numTrees=20, featureSubsetStrategy="auto",
subsamplingRate=1.0, leafCol="", minWeightFractionPerNode=0.0,
bootstrap=True)
@inherit_doc
class RandomForestClassifier(_JavaProbabilisticClassifier, _RandomForestClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
.. versionadded:: 1.4.0
Examples
--------
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42,
... leafCol="leafId")
>>> rf.getMinWeightFractionPerNode()
0.0
>>> model = rf.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
RandomForestClassificationModel...
>>> model.setRawPredictionCol("newRawPrediction")
RandomForestClassificationModel...
>>> model.getBootstrap()
True
>>> model.getRawPredictionCol()
'newRawPrediction'
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([2.0, 0.0])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> numpy.argmax(result.probability)
0
>>> numpy.argmax(result.newRawPrediction)
0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.trees
[DecisionTreeClassificationModel...depth=..., DecisionTreeClassificationModel...]
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0, \
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True)
"""
super(RandomForestClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.RandomForestClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None,
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0, \
leafCol="", minWeightFractionPerNode=0.0, weightCol=None, bootstrap=True)
Sets params for linear classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("3.0.0")
def setBootstrap(self, value):
"""
Sets the value of :py:attr:`bootstrap`.
"""
return self._set(bootstrap=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class RandomForestClassificationModel(_TreeEnsembleModel, _JavaProbabilisticClassificationModel,
_RandomForestClassifierParams, JavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by RandomForestClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
See Also
--------
DecisionTreeClassificationModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))]
@property
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
if self.numClasses <= 2:
return BinaryRandomForestClassificationTrainingSummary(
super(RandomForestClassificationModel, self).summary)
else:
return RandomForestClassificationTrainingSummary(
super(RandomForestClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_rf_summary = self._call_java("evaluate", dataset)
if self.numClasses <= 2:
return BinaryRandomForestClassificationSummary(java_rf_summary)
else:
return RandomForestClassificationSummary(java_rf_summary)
class RandomForestClassificationSummary(_ClassificationSummary):
"""
Abstraction for RandomForestClassification Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class RandomForestClassificationTrainingSummary(RandomForestClassificationSummary,
_TrainingSummary):
"""
Abstraction for RandomForestClassificationTraining Training results.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class BinaryRandomForestClassificationSummary(_BinaryClassificationSummary):
"""
BinaryRandomForestClassification results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class BinaryRandomForestClassificationTrainingSummary(BinaryRandomForestClassificationSummary,
RandomForestClassificationTrainingSummary):
"""
BinaryRandomForestClassification training results for a given model.
.. versionadded:: 3.1.0
"""
pass
class _GBTClassifierParams(_GBTParams, _HasVarianceImpurity):
"""
Params for :py:class:`GBTClassifier` and :py:class:`GBTClassifierModel`.
.. versionadded:: 3.0.0
"""
supportedLossTypes = ["logistic"]
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(supportedLossTypes),
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_GBTClassifierParams, self).__init__(*args)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
leafCol="", minWeightFractionPerNode=0.0)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
@inherit_doc
class GBTClassifier(_JavaProbabilisticClassifier, _GBTClassifierParams,
JavaMLWritable, JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for classification.
It supports binary labels, as well as both continuous and categorical features.
.. versionadded:: 1.4.0
Notes
-----
Multiclass labels are not currently supported.
The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
Gradient Boosting vs. TreeBoost:
- This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
- Both algorithms learn tree ensembles by minimizing loss functions.
- TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
based on the loss function, whereas the original gradient boosting method does not.
- We expect to implement TreeBoost in the future:
`SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_
Examples
--------
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42,
... leafCol="leafId")
>>> gbt.setMaxIter(5)
GBTClassifier...
>>> gbt.setMinWeightFractionPerNode(0.049)
GBTClassifier...
>>> gbt.getMaxIter()
5
>>> gbt.getFeatureSubsetStrategy()
'all'
>>> model = gbt.fit(td)
>>> model.getLabelCol()
'indexed'
>>> model.setFeaturesCol("features")
GBTClassificationModel...
>>> model.setThresholds([0.3, 0.7])
GBTClassificationModel...
>>> model.getThresholds()
[0.3, 0.7]
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.predict(test0.head().features)
0.0
>>> model.predictRaw(test0.head().features)
DenseVector([1.1697, -1.1697])
>>> model.predictProbability(test0.head().features)
DenseVector([0.9121, 0.0879])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.leafId
DenseVector([0.0, 0.0, 0.0, 0.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.totalNumNodes
15
>>> print(model.toDebugString)
GBTClassificationModel...numTrees=5...
>>> gbtc_path = temp_path + "gbtc"
>>> gbt.save(gbtc_path)
>>> gbt2 = GBTClassifier.load(gbtc_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtc_model"
>>> model.save(model_path)
>>> model2 = GBTClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.trees
[DecisionTreeRegressionModel...depth=..., DecisionTreeRegressionModel...]
>>> validation = spark.createDataFrame([(0.0, Vectors.dense(-1.0),)],
... ["indexed", "features"])
>>> model.evaluateEachIteration(validation)
[0.25..., 0.23..., 0.21..., 0.19..., 0.18...]
>>> model.numClasses
2
>>> gbt = gbt.setValidationIndicatorCol("validationIndicator")
>>> gbt.getValidationIndicatorCol()
'validationIndicator'
>>> gbt.getValidationTol()
0.01
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic",
maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, impurity="variance",
featureSubsetStrategy="all", validationTol=0.01, validationIndicatorCol=None,
leafCol="", minWeightFractionPerNode=0.0, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
"""
super(GBTClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.GBTClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0,
impurity="variance", featureSubsetStrategy="all", validationTol=0.01,
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0,
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0, \
impurity="variance", featureSubsetStrategy="all", validationTol=0.01, \
validationIndicatorCol=None, leafCol="", minWeightFractionPerNode=0.0, \
weightCol=None)
Sets params for Gradient Boosted Tree Classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTClassificationModel(java_model)
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("2.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("3.0.0")
def setValidationIndicatorCol(self, value):
"""
Sets the value of :py:attr:`validationIndicatorCol`.
"""
return self._set(validationIndicatorCol=value)
@since("1.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.4.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("1.4.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.4.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("3.0.0")
def setMinWeightFractionPerNode(self, value):
"""
Sets the value of :py:attr:`minWeightFractionPerNode`.
"""
return self._set(minWeightFractionPerNode=value)
class GBTClassificationModel(_TreeEnsembleModel, _JavaProbabilisticClassificationModel,
_GBTClassifierParams, JavaMLWritable, JavaMLReadable):
"""
Model fitted by GBTClassifier.
.. versionadded:: 1.4.0
"""
@property
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. versionadded:: 2.0.0
See Also
--------
DecisionTreeClassificationModel.featureImportances
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
def evaluateEachIteration(self, dataset):
"""
Method to compute error or loss for every iteration of gradient boosting.
.. versionadded:: 2.4.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
return self._call_java("evaluateEachIteration", dataset)
class _NaiveBayesParams(_PredictorParams, HasWeightCol):
"""
Params for :py:class:`NaiveBayes` and :py:class:`NaiveBayesModel`.
.. versionadded:: 3.0.0
"""
smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " +
"default is 1.0", typeConverter=TypeConverters.toFloat)
modelType = Param(Params._dummy(), "modelType", "The model type which is a string " +
"(case-sensitive). Supported options: multinomial (default), bernoulli " +
"and gaussian.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_NaiveBayesParams, self).__init__(*args)
self._setDefault(smoothing=1.0, modelType="multinomial")
@since("1.5.0")
def getSmoothing(self):
"""
Gets the value of smoothing or its default value.
"""
return self.getOrDefault(self.smoothing)
@since("1.5.0")
def getModelType(self):
"""
Gets the value of modelType or its default value.
"""
return self.getOrDefault(self.modelType)
@inherit_doc
class NaiveBayes(_JavaProbabilisticClassifier, _NaiveBayesParams, HasThresholds, HasWeightCol,
JavaMLWritable, JavaMLReadable):
"""
Naive Bayes Classifiers.
It supports both Multinomial and Bernoulli NB. `Multinomial NB \
<http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_
can handle finitely supported discrete data. For example, by converting documents into
TF-IDF vectors, it can be used for document classification. By making every vector a
binary (0/1) data, it can also be used as `Bernoulli NB \
<http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_.
The input feature values for Multinomial NB and Bernoulli NB must be nonnegative.
Since 3.0.0, it supports Complement NB which is an adaptation of the Multinomial NB.
Specifically, Complement NB uses statistics from the complement of each class to compute
the model's coefficients. The inventors of Complement NB show empirically that the parameter
estimates for CNB are more stable than those for Multinomial NB. Like Multinomial NB, the
input feature values for Complement NB must be nonnegative.
Since 3.0.0, it also supports `Gaussian NB \
<https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Gaussian_naive_Bayes>`_.
which can handle continuous data.
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
>>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
>>> model = nb.fit(df)
>>> model.setFeaturesCol("features")
NaiveBayesModel...
>>> model.getSmoothing()
1.0
>>> model.pi
DenseVector([-0.81..., -0.58...])
>>> model.theta
DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
>>> model.sigma
DenseMatrix(0, 0, [...], ...)
>>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.72..., -0.99...])
>>> model.predictProbability(test0.head().features)
DenseVector([0.32..., 0.67...])
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.32..., 0.67...])
>>> result.rawPrediction
DenseVector([-1.72..., -0.99...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
1.0
>>> nb_path = temp_path + "/nb"
>>> nb.save(nb_path)
>>> nb2 = NaiveBayes.load(nb_path)
>>> nb2.getSmoothing()
1.0
>>> model_path = temp_path + "/nb_model"
>>> model.save(model_path)
>>> model2 = NaiveBayesModel.load(model_path)
>>> model.pi == model2.pi
True
>>> model.theta == model2.theta
True
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> nb = nb.setThresholds([0.01, 10.00])
>>> model3 = nb.fit(df)
>>> result = model3.transform(test0).head()
>>> result.prediction
0.0
>>> nb3 = NaiveBayes().setModelType("gaussian")
>>> model4 = nb3.fit(df)
>>> model4.getModelType()
'gaussian'
>>> model4.sigma
DenseMatrix(2, 2, [0.0, 0.25, 0.0, 0.0], 1)
>>> nb5 = NaiveBayes(smoothing=1.0, modelType="complement", weightCol="weight")
>>> model5 = nb5.fit(df)
>>> model5.getModelType()
'complement'
>>> model5.theta
DenseMatrix(2, 2, [...], 1)
>>> model5.sigma
DenseMatrix(0, 0, [...], ...)
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
"""
super(NaiveBayes, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.NaiveBayes", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
Sets params for Naive Bayes.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return NaiveBayesModel(java_model)
@since("1.5.0")
def setSmoothing(self, value):
"""
Sets the value of :py:attr:`smoothing`.
"""
return self._set(smoothing=value)
@since("1.5.0")
def setModelType(self, value):
"""
Sets the value of :py:attr:`modelType`.
"""
return self._set(modelType=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
class NaiveBayesModel(_JavaProbabilisticClassificationModel, _NaiveBayesParams, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by NaiveBayes.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pi(self):
"""
log of class priors.
"""
return self._call_java("pi")
@property
@since("2.0.0")
def theta(self):
"""
log of class conditional probabilities.
"""
return self._call_java("theta")
@property
@since("3.0.0")
def sigma(self):
"""
variance of each feature.
"""
return self._call_java("sigma")
class _MultilayerPerceptronParams(_ProbabilisticClassifierParams, HasSeed, HasMaxIter,
HasTol, HasStepSize, HasSolver, HasBlockSize):
"""
Params for :py:class:`MultilayerPerceptronClassifier`.
.. versionadded:: 3.0.0
"""
layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " +
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
"neurons and output layer of 10 neurons.",
typeConverter=TypeConverters.toListInt)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: l-bfgs, gd.", typeConverter=TypeConverters.toString)
initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.",
typeConverter=TypeConverters.toVector)
def __init__(self, *args):
super(_MultilayerPerceptronParams, self).__init__(*args)
self._setDefault(maxIter=100, tol=1E-6, blockSize=128, stepSize=0.03, solver="l-bfgs")
@since("1.6.0")
def getLayers(self):
"""
Gets the value of layers or its default value.
"""
return self.getOrDefault(self.layers)
@since("2.0.0")
def getInitialWeights(self):
"""
Gets the value of initialWeights or its default value.
"""
return self.getOrDefault(self.initialWeights)
@inherit_doc
class MultilayerPerceptronClassifier(_JavaProbabilisticClassifier, _MultilayerPerceptronParams,
JavaMLWritable, JavaMLReadable):
"""
Classifier trainer based on the Multilayer Perceptron.
Each layer has sigmoid activation function, output layer has softmax.
Number of inputs has to be equal to the size of feature vectors.
Number of outputs has to be equal to the total number of labels.
.. versionadded:: 1.6.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (0.0, Vectors.dense([0.0, 0.0])),
... (1.0, Vectors.dense([0.0, 1.0])),
... (1.0, Vectors.dense([1.0, 0.0])),
... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"])
>>> mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
>>> mlp.setMaxIter(100)
MultilayerPerceptronClassifier...
>>> mlp.getMaxIter()
100
>>> mlp.getBlockSize()
128
>>> mlp.setBlockSize(1)
MultilayerPerceptronClassifier...
>>> mlp.getBlockSize()
1
>>> model = mlp.fit(df)
>>> model.setFeaturesCol("features")
MultilayerPerceptronClassificationModel...
>>> model.getMaxIter()
100
>>> model.getLayers()
[2, 2, 2]
>>> model.weights.size
12
>>> testDF = spark.createDataFrame([
... (Vectors.dense([1.0, 0.0]),),
... (Vectors.dense([0.0, 0.0]),)], ["features"])
>>> model.predict(testDF.head().features)
1.0
>>> model.predictRaw(testDF.head().features)
DenseVector([-16.208, 16.344])
>>> model.predictProbability(testDF.head().features)
DenseVector([0.0, 1.0])
>>> model.transform(testDF).select("features", "prediction").show()
+---------+----------+
| features|prediction|
+---------+----------+
|[1.0,0.0]| 1.0|
|[0.0,0.0]| 0.0|
+---------+----------+
...
>>> mlp_path = temp_path + "/mlp"
>>> mlp.save(mlp_path)
>>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path)
>>> mlp2.getBlockSize()
1
>>> model_path = temp_path + "/mlp_model"
>>> model.save(model_path)
>>> model2 = MultilayerPerceptronClassificationModel.load(model_path)
>>> model.getLayers() == model2.getLayers()
True
>>> model.weights == model2.weights
True
>>> model.transform(testDF).take(1) == model2.transform(testDF).take(1)
True
>>> mlp2 = mlp2.setInitialWeights(list(range(0, 12)))
>>> model3 = mlp2.fit(df)
>>> model3.weights != model2.weights
True
>>> model3.getLayers() == model.getLayers()
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None, probabilityCol="probability",
rawPredictionCol="rawPrediction"):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction")
"""
super(MultilayerPerceptronClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None, probabilityCol="probability",
rawPredictionCol="rawPrediction"):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction"):
Sets params for MultilayerPerceptronClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MultilayerPerceptronClassificationModel(java_model)
@since("1.6.0")
def setLayers(self, value):
"""
Sets the value of :py:attr:`layers`.
"""
return self._set(layers=value)
@since("1.6.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@since("2.0.0")
def setInitialWeights(self, value):
"""
Sets the value of :py:attr:`initialWeights`.
"""
return self._set(initialWeights=value)
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
class MultilayerPerceptronClassificationModel(_JavaProbabilisticClassificationModel,
_MultilayerPerceptronParams, JavaMLWritable,
JavaMLReadable, HasTrainingSummary):
"""
Model fitted by MultilayerPerceptronClassifier.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def weights(self):
"""
the weights of layers.
"""
return self._call_java("weights")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return MultilayerPerceptronClassificationTrainingSummary(
super(MultilayerPerceptronClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_mlp_summary = self._call_java("evaluate", dataset)
return MultilayerPerceptronClassificationSummary(java_mlp_summary)
class MultilayerPerceptronClassificationSummary(_ClassificationSummary):
"""
Abstraction for MultilayerPerceptronClassifier Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class MultilayerPerceptronClassificationTrainingSummary(MultilayerPerceptronClassificationSummary,
_TrainingSummary):
"""
Abstraction for MultilayerPerceptronClassifier Training results.
.. versionadded:: 3.1.0
"""
pass
class _OneVsRestParams(_ClassifierParams, HasWeightCol):
"""
Params for :py:class:`OneVsRest` and :py:class:`OneVsRestModelModel`.
"""
classifier = Param(Params._dummy(), "classifier", "base binary classifier")
@since("2.0.0")
def getClassifier(self):
"""
Gets the value of classifier or its default value.
"""
return self.getOrDefault(self.classifier)
@inherit_doc
class OneVsRest(Estimator, _OneVsRestParams, HasParallelism, MLReadable, MLWritable):
"""
Reduction of Multiclass Classification to Binary Classification.
Performs reduction using one against all strategy.
For a multiclass classification with k classes, train k models (one per class).
Each example is scored against all k models and the model with highest score
is picked to label the example.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> df = spark.read.format("libsvm").load(data_path)
>>> lr = LogisticRegression(regParam=0.01)
>>> ovr = OneVsRest(classifier=lr)
>>> ovr.getRawPredictionCol()
'rawPrediction'
>>> ovr.setPredictionCol("newPrediction")
OneVsRest...
>>> model = ovr.fit(df)
>>> model.models[0].coefficients
DenseVector([0.5..., -1.0..., 3.4..., 4.2...])
>>> model.models[1].coefficients
DenseVector([-2.1..., 3.1..., -2.6..., -2.3...])
>>> model.models[2].coefficients
DenseVector([0.3..., -3.4..., 1.0..., -1.1...])
>>> [x.intercept for x in model.models]
[-2.7..., -2.5..., -1.3...]
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0, 1.0, 1.0))]).toDF()
>>> model.transform(test0).head().newPrediction
0.0
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(4, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().newPrediction
2.0
>>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4, 0.3, 0.2))]).toDF()
>>> model.transform(test2).head().newPrediction
0.0
>>> model_path = temp_path + "/ovr_model"
>>> model.save(model_path)
>>> model2 = OneVsRestModel.load(model_path)
>>> model2.transform(test0).head().newPrediction
0.0
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
>>> model.transform(test2).columns
['features', 'rawPrediction', 'newPrediction']
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
super(OneVsRest, self).__init__()
self._setDefault(parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
rawPredictionCol="rawPrediction", classifier=None, weightCol=None, parallelism=1):
Sets params for OneVsRest.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setClassifier(self, value):
"""
Sets the value of :py:attr:`classifier`.
"""
return self._set(classifier=value)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def _fit(self, dataset):
labelCol = self.getLabelCol()
featuresCol = self.getFeaturesCol()
predictionCol = self.getPredictionCol()
classifier = self.getClassifier()
numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1
weightCol = None
if (self.isDefined(self.weightCol) and self.getWeightCol()):
if isinstance(classifier, HasWeightCol):
weightCol = self.getWeightCol()
else:
warnings.warn("weightCol is ignored, "
"as it is not supported by {} now.".format(classifier))
if weightCol:
multiclassLabeled = dataset.select(labelCol, featuresCol, weightCol)
else:
multiclassLabeled = dataset.select(labelCol, featuresCol)
# persist if underlying dataset is not persistent.
handlePersistence = dataset.storageLevel == StorageLevel(False, False, False, False)
if handlePersistence:
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
def trainSingleClass(index):
binaryLabelCol = "mc2b$" + str(index)
trainingDataset = multiclassLabeled.withColumn(
binaryLabelCol,
when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0))
paramMap = dict([(classifier.labelCol, binaryLabelCol),
(classifier.featuresCol, featuresCol),
(classifier.predictionCol, predictionCol)])
if weightCol:
paramMap[classifier.weightCol] = weightCol
return classifier.fit(trainingDataset, paramMap)
pool = ThreadPool(processes=min(self.getParallelism(), numClasses))
models = pool.map(trainSingleClass, range(numClasses))
if handlePersistence:
multiclassLabeled.unpersist()
return self._copyValues(OneVsRestModel(models=models))
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Examples
--------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`OneVsRest`
Copy of this instance
"""
if extra is None:
extra = dict()
newOvr = Params.copy(self, extra)
if self.isSet(self.classifier):
newOvr.setClassifier(self.getClassifier().copy(extra))
return newOvr
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRest, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
rawPredictionCol = java_stage.getRawPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
parallelism = java_stage.getParallelism()
py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol,
rawPredictionCol=rawPredictionCol, classifier=classifier,
parallelism=parallelism)
if java_stage.isDefined(java_stage.getParam("weightCol")):
py_stage.setWeightCol(java_stage.getWeightCol())
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRest. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
_java_obj.setClassifier(self.getClassifier()._to_java())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setFeaturesCol(self.getFeaturesCol())
_java_obj.setLabelCol(self.getLabelCol())
_java_obj.setPredictionCol(self.getPredictionCol())
if (self.isDefined(self.weightCol) and self.getWeightCol()):
_java_obj.setWeightCol(self.getWeightCol())
_java_obj.setRawPredictionCol(self.getRawPredictionCol())
return _java_obj
@classmethod
def read(cls):
return OneVsRestReader(cls)
def write(self):
if isinstance(self.getClassifier(), JavaMLWritable):
return JavaMLWriter(self)
else:
return OneVsRestWriter(self)
class _OneVsRestSharedReadWrite:
@staticmethod
def saveImpl(instance, sc, path, extraMetadata=None):
skipParams = ['classifier']
jsonParams = DefaultParamsWriter.extractJsonParams(instance, skipParams)
DefaultParamsWriter.saveMetadata(instance, path, sc, paramMap=jsonParams,
extraMetadata=extraMetadata)
classifierPath = os.path.join(path, 'classifier')
instance.getClassifier().save(classifierPath)
@staticmethod
def loadClassifier(path, sc):
classifierPath = os.path.join(path, 'classifier')
return DefaultParamsReader.loadParamsInstance(classifierPath, sc)
@staticmethod
def validateParams(instance):
elems_to_check = [instance.getClassifier()]
if isinstance(instance, OneVsRestModel):
elems_to_check.extend(instance.models)
for elem in elems_to_check:
if not isinstance(elem, MLWritable):
raise ValueError(f'OneVsRest write will fail because it contains {elem.uid} '
f'which is not writable.')
@inherit_doc
class OneVsRestReader(MLReader):
def __init__(self, cls):
super(OneVsRestReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
classifier = _OneVsRestSharedReadWrite.loadClassifier(path, self.sc)
ova = OneVsRest(classifier=classifier)._resetUid(metadata['uid'])
DefaultParamsReader.getAndSetParams(ova, metadata, skipParams=['classifier'])
return ova
@inherit_doc
class OneVsRestWriter(MLWriter):
def __init__(self, instance):
super(OneVsRestWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_OneVsRestSharedReadWrite.validateParams(self.instance)
_OneVsRestSharedReadWrite.saveImpl(self.instance, self.sc, path)
class OneVsRestModel(Model, _OneVsRestParams, MLReadable, MLWritable):
"""
Model fitted by OneVsRest.
This stores the models resulting from training k binary classifiers: one for each class.
Each example is scored against all k models, and the model with the highest score
is picked to label the example.
.. versionadded:: 2.0.0
"""
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def __init__(self, models):
super(OneVsRestModel, self).__init__()
self.models = models
if not isinstance(models[0], JavaMLWritable):
return
# set java instance
java_models = [model._to_java() for model in self.models]
sc = SparkContext._active_spark_context
java_models_array = JavaWrapper._new_java_array(java_models,
sc._gateway.jvm.org.apache.spark.ml
.classification.ClassificationModel)
# TODO: need to set metadata
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
self._java_obj = \
JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
def _transform(self, dataset):
# determine the input columns: these need to be passed through
origCols = dataset.columns
# add an accumulator column to store predictions of all the models
accColName = "mbc$acc" + str(uuid.uuid4())
initUDF = udf(lambda _: [], ArrayType(DoubleType()))
newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]]))
# persist if underlying dataset is not persistent.
handlePersistence = dataset.storageLevel == StorageLevel(False, False, False, False)
if handlePersistence:
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
# update the accumulator column with the result of prediction of models
aggregatedDataset = newDataset
for index, model in enumerate(self.models):
rawPredictionCol = self.getRawPredictionCol()
columns = origCols + [rawPredictionCol, accColName]
# add temporary column to store intermediate scores and update
tmpColName = "mbc$tmp" + str(uuid.uuid4())
updateUDF = udf(
lambda predictions, prediction: predictions + [prediction.tolist()[1]],
ArrayType(DoubleType()))
transformedDataset = model.transform(aggregatedDataset).select(*columns)
updatedDataset = transformedDataset.withColumn(
tmpColName,
updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol]))
newColumns = origCols + [tmpColName]
# switch out the intermediate column with the accumulator column
aggregatedDataset = updatedDataset\
.select(*newColumns).withColumnRenamed(tmpColName, accColName)
if handlePersistence:
newDataset.unpersist()
if self.getRawPredictionCol():
def func(predictions):
predArray = []
for x in predictions:
predArray.append(x)
return Vectors.dense(predArray)
rawPredictionUDF = udf(func, VectorUDT())
aggregatedDataset = aggregatedDataset.withColumn(
self.getRawPredictionCol(), rawPredictionUDF(aggregatedDataset[accColName]))
if self.getPredictionCol():
# output the index of the classifier with highest confidence as prediction
labelUDF = udf(lambda predictions: float(max(enumerate(predictions),
key=operator.itemgetter(1))[0]), DoubleType())
aggregatedDataset = aggregatedDataset.withColumn(
self.getPredictionCol(), labelUDF(aggregatedDataset[accColName]))
return aggregatedDataset.drop(accColName)
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
.. versionadded:: 2.0.0
Parameters
----------
extra : dict, optional
Extra parameters to copy to the new instance
Returns
-------
:py:class:`OneVsRestModel`
Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol)\
.setFeaturesCol(featuresCol)
py_stage._set(labelCol=labelCol)
if java_stage.isDefined(java_stage.getParam("weightCol")):
py_stage._set(weightCol=java_stage.getWeightCol())
py_stage._set(classifier=classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
Returns
-------
py4j.java_gateway.JavaObject
Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
java_models = [model._to_java() for model in self.models]
java_models_array = JavaWrapper._new_java_array(
java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
if (self.isDefined(self.weightCol) and self.getWeightCol()):
_java_obj.set("weightCol", self.getWeightCol())
return _java_obj
@classmethod
def read(cls):
return OneVsRestModelReader(cls)
def write(self):
if all(map(lambda elem: isinstance(elem, JavaMLWritable),
[self.getClassifier()] + self.models)):
return JavaMLWriter(self)
else:
return OneVsRestModelWriter(self)
@inherit_doc
class OneVsRestModelReader(MLReader):
def __init__(self, cls):
super(OneVsRestModelReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if not DefaultParamsReader.isPythonParamsInstance(metadata):
return JavaMLReader(self.cls).load(path)
else:
classifier = _OneVsRestSharedReadWrite.loadClassifier(path, self.sc)
numClasses = metadata['numClasses']
subModels = [None] * numClasses
for idx in range(numClasses):
subModelPath = os.path.join(path, f'model_{idx}')
subModels[idx] = DefaultParamsReader.loadParamsInstance(subModelPath, self.sc)
ovaModel = OneVsRestModel(subModels)._resetUid(metadata['uid'])
ovaModel.set(ovaModel.classifier, classifier)
DefaultParamsReader.getAndSetParams(ovaModel, metadata, skipParams=['classifier'])
return ovaModel
@inherit_doc
class OneVsRestModelWriter(MLWriter):
def __init__(self, instance):
super(OneVsRestModelWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
_OneVsRestSharedReadWrite.validateParams(self.instance)
instance = self.instance
numClasses = len(instance.models)
extraMetadata = {'numClasses': numClasses}
_OneVsRestSharedReadWrite.saveImpl(instance, self.sc, path, extraMetadata=extraMetadata)
for idx in range(numClasses):
subModelPath = os.path.join(path, f'model_{idx}')
instance.models[idx].save(subModelPath)
@inherit_doc
class FMClassifier(_JavaProbabilisticClassifier, _FactorizationMachinesParams, JavaMLWritable,
JavaMLReadable):
"""
Factorization Machines learning algorithm for classification.
Solver supports:
* gd (normal mini-batch gradient descent)
* adamW (default)
.. versionadded:: 3.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.classification import FMClassifier
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> fm = FMClassifier(factorSize=2)
>>> fm.setSeed(11)
FMClassifier...
>>> model = fm.fit(df)
>>> model.getMaxIter()
100
>>> test0 = spark.createDataFrame([
... (Vectors.dense(-1.0),),
... (Vectors.dense(0.5),),
... (Vectors.dense(1.0),),
... (Vectors.dense(2.0),)], ["features"])
>>> model.predictRaw(test0.head().features)
DenseVector([22.13..., -22.13...])
>>> model.predictProbability(test0.head().features)
DenseVector([1.0, 0.0])
>>> model.transform(test0).select("features", "probability").show(10, False)
+--------+------------------------------------------+
|features|probability |
+--------+------------------------------------------+
|[-1.0] |[0.9999999997574736,2.425264676902229E-10]|
|[0.5] |[0.47627851732981163,0.5237214826701884] |
|[1.0] |[5.491554426243495E-4,0.9994508445573757] |
|[2.0] |[2.005766663870645E-10,0.9999999997994233]|
+--------+------------------------------------------+
...
>>> model.intercept
-7.316665276826291
>>> model.linear
DenseVector([14.8232])
>>> model.factors
DenseMatrix(1, 2, [0.0163, -0.0051], 1)
>>> model_path = temp_path + "/fm_model"
>>> model.save(model_path)
>>> model2 = FMClassificationModel.load(model_path)
>>> model2.intercept
-7.316665276826291
>>> model2.linear
DenseVector([14.8232])
>>> model2.factors
DenseMatrix(1, 2, [0.0163, -0.0051], 1)
>>> model.transform(test0).take(1) == model2.transform(test0).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", thresholds=None, seed=None):
"""
__init__(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", thresholds=None, seed=None)
"""
super(FMClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.FMClassifier", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0,
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0,
tol=1e-6, solver="adamW", thresholds=None, seed=None):
"""
setParams(self, \\*, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
factorSize=8, fitIntercept=True, fitLinear=True, regParam=0.0, \
miniBatchFraction=1.0, initStd=0.01, maxIter=100, stepSize=1.0, \
tol=1e-6, solver="adamW", thresholds=None, seed=None)
Sets Params for FMClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return FMClassificationModel(java_model)
@since("3.0.0")
def setFactorSize(self, value):
"""
Sets the value of :py:attr:`factorSize`.
"""
return self._set(factorSize=value)
@since("3.0.0")
def setFitLinear(self, value):
"""
Sets the value of :py:attr:`fitLinear`.
"""
return self._set(fitLinear=value)
@since("3.0.0")
def setMiniBatchFraction(self, value):
"""
Sets the value of :py:attr:`miniBatchFraction`.
"""
return self._set(miniBatchFraction=value)
@since("3.0.0")
def setInitStd(self, value):
"""
Sets the value of :py:attr:`initStd`.
"""
return self._set(initStd=value)
@since("3.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("3.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("3.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("3.0.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
class FMClassificationModel(_JavaProbabilisticClassificationModel, _FactorizationMachinesParams,
JavaMLWritable, JavaMLReadable, HasTrainingSummary):
"""
Model fitted by :class:`FMClassifier`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("3.0.0")
def linear(self):
"""
Model linear term.
"""
return self._call_java("linear")
@property
@since("3.0.0")
def factors(self):
"""
Model factor term.
"""
return self._call_java("factors")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return FMClassificationTrainingSummary(super(FMClassificationModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
.. versionadded:: 3.1.0
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
Test dataset to evaluate model on.
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_fm_summary = self._call_java("evaluate", dataset)
return FMClassificationSummary(java_fm_summary)
class FMClassificationSummary(_BinaryClassificationSummary):
"""
Abstraction for FMClassifier Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class FMClassificationTrainingSummary(FMClassificationSummary, _TrainingSummary):
"""
Abstraction for FMClassifier Training results.
.. versionadded:: 3.1.0
"""
pass
if __name__ == "__main__":
import doctest
import pyspark.ml.classification
from pyspark.sql import SparkSession
globs = pyspark.ml.classification.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.classification tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| BryanCutler/spark | python/pyspark/ml/classification.py | Python | apache-2.0 | 126,641 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
import collections
import re
from contextlib import contextmanager
from pyasn1.codec.der import encoder
from pyasn1.type import namedtype, univ
import pytest
import six
from cryptography.exceptions import UnsupportedAlgorithm
import cryptography_vectors
HashVector = collections.namedtuple("HashVector", ["message", "digest"])
KeyedHashVector = collections.namedtuple(
"KeyedHashVector", ["message", "digest", "key"]
)
def select_backends(names, backend_list):
if names is None:
return backend_list
split_names = [x.strip() for x in names.split(',')]
selected_backends = []
for backend in backend_list:
if backend.name in split_names:
selected_backends.append(backend)
if len(selected_backends) > 0:
return selected_backends
else:
raise ValueError(
"No backend selected. Tried to select: {0}".format(split_names)
)
def skip_if_empty(backend_list, required_interfaces):
if not backend_list:
pytest.skip(
"No backends provided supply the interface: {0}".format(
", ".join(iface.__name__ for iface in required_interfaces)
)
)
def check_backend_support(item):
supported = item.keywords.get("supported")
if supported and "backend" in item.funcargs:
if not supported.kwargs["only_if"](item.funcargs["backend"]):
pytest.skip("{0} ({1})".format(
supported.kwargs["skip_message"], item.funcargs["backend"]
))
elif supported:
raise ValueError("This mark is only available on methods that take a "
"backend")
@contextmanager
def raises_unsupported_algorithm(reason):
with pytest.raises(UnsupportedAlgorithm) as exc_info:
yield exc_info
assert exc_info.value._reason is reason
class _DSSSigValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('r', univ.Integer()),
namedtype.NamedType('s', univ.Integer())
)
def der_encode_dsa_signature(r, s):
sig = _DSSSigValue()
sig.setComponentByName('r', r)
sig.setComponentByName('s', s)
return encoder.encode(sig)
def load_vectors_from_file(filename, loader):
with cryptography_vectors.open_vector_file(filename) as vector_file:
return loader(vector_file)
def load_nist_vectors(vector_data):
test_data = None
data = []
for line in vector_data:
line = line.strip()
# Blank lines, comments, and section headers are ignored
if not line or line.startswith("#") or (line.startswith("[")
and line.endswith("]")):
continue
if line.strip() == "FAIL":
test_data["fail"] = True
continue
# Build our data using a simple Key = Value format
name, value = [c.strip() for c in line.split("=")]
# Some tests (PBKDF2) contain \0, which should be interpreted as a
# null character rather than literal.
value = value.replace("\\0", "\0")
# COUNT is a special token that indicates a new block of data
if name.upper() == "COUNT":
test_data = {}
data.append(test_data)
continue
# For all other tokens we simply want the name, value stored in
# the dictionary
else:
test_data[name.lower()] = value.encode("ascii")
return data
def load_cryptrec_vectors(vector_data):
cryptrec_list = []
for line in vector_data:
line = line.strip()
# Blank lines and comments are ignored
if not line or line.startswith("#"):
continue
if line.startswith("K"):
key = line.split(" : ")[1].replace(" ", "").encode("ascii")
elif line.startswith("P"):
pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
elif line.startswith("C"):
ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
# after a C is found the K+P+C tuple is complete
# there are many P+C pairs for each K
cryptrec_list.append({
"key": key,
"plaintext": pt,
"ciphertext": ct
})
else:
raise ValueError("Invalid line in file '{}'".format(line))
return cryptrec_list
def load_hash_vectors(vector_data):
vectors = []
key = None
msg = None
md = None
for line in vector_data:
line = line.strip()
if not line or line.startswith("#") or line.startswith("["):
continue
if line.startswith("Len"):
length = int(line.split(" = ")[1])
elif line.startswith("Key"):
# HMAC vectors contain a key attribute. Hash vectors do not.
key = line.split(" = ")[1].encode("ascii")
elif line.startswith("Msg"):
# In the NIST vectors they have chosen to represent an empty
# string as hex 00, which is of course not actually an empty
# string. So we parse the provided length and catch this edge case.
msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
elif line.startswith("MD"):
md = line.split(" = ")[1]
# after MD is found the Msg+MD (+ potential key) tuple is complete
if key is not None:
vectors.append(KeyedHashVector(msg, md, key))
key = None
msg = None
md = None
else:
vectors.append(HashVector(msg, md))
msg = None
md = None
else:
raise ValueError("Unknown line in hash vector")
return vectors
def load_pkcs1_vectors(vector_data):
"""
Loads data out of RSA PKCS #1 vector files.
"""
private_key_vector = None
public_key_vector = None
attr = None
key = None
example_vector = None
examples = []
vectors = []
for line in vector_data:
if (
line.startswith("# PSS Example") or
line.startswith("# OAEP Example") or
line.startswith("# PKCS#1 v1.5")
):
if example_vector:
for key, value in six.iteritems(example_vector):
hex_str = "".join(value).replace(" ", "").encode("ascii")
example_vector[key] = hex_str
examples.append(example_vector)
attr = None
example_vector = collections.defaultdict(list)
if line.startswith("# Message"):
attr = "message"
continue
elif line.startswith("# Salt"):
attr = "salt"
continue
elif line.startswith("# Seed"):
attr = "seed"
continue
elif line.startswith("# Signature"):
attr = "signature"
continue
elif line.startswith("# Encryption"):
attr = "encryption"
continue
elif (
example_vector and
line.startswith("# =============================================")
):
for key, value in six.iteritems(example_vector):
hex_str = "".join(value).replace(" ", "").encode("ascii")
example_vector[key] = hex_str
examples.append(example_vector)
example_vector = None
attr = None
elif example_vector and line.startswith("#"):
continue
else:
if attr is not None and example_vector is not None:
example_vector[attr].append(line.strip())
continue
if (
line.startswith("# Example") or
line.startswith("# =============================================")
):
if key:
assert private_key_vector
assert public_key_vector
for key, value in six.iteritems(public_key_vector):
hex_str = "".join(value).replace(" ", "")
public_key_vector[key] = int(hex_str, 16)
for key, value in six.iteritems(private_key_vector):
hex_str = "".join(value).replace(" ", "")
private_key_vector[key] = int(hex_str, 16)
private_key_vector["examples"] = examples
examples = []
assert (
private_key_vector['public_exponent'] ==
public_key_vector['public_exponent']
)
assert (
private_key_vector['modulus'] ==
public_key_vector['modulus']
)
vectors.append(
(private_key_vector, public_key_vector)
)
public_key_vector = collections.defaultdict(list)
private_key_vector = collections.defaultdict(list)
key = None
attr = None
if private_key_vector is None or public_key_vector is None:
continue
if line.startswith("# Private key"):
key = private_key_vector
elif line.startswith("# Public key"):
key = public_key_vector
elif line.startswith("# Modulus:"):
attr = "modulus"
elif line.startswith("# Public exponent:"):
attr = "public_exponent"
elif line.startswith("# Exponent:"):
if key is public_key_vector:
attr = "public_exponent"
else:
assert key is private_key_vector
attr = "private_exponent"
elif line.startswith("# Prime 1:"):
attr = "p"
elif line.startswith("# Prime 2:"):
attr = "q"
elif line.startswith("# Prime exponent 1:"):
attr = "dmp1"
elif line.startswith("# Prime exponent 2:"):
attr = "dmq1"
elif line.startswith("# Coefficient:"):
attr = "iqmp"
elif line.startswith("#"):
attr = None
else:
if key is not None and attr is not None:
key[attr].append(line.strip())
return vectors
def load_rsa_nist_vectors(vector_data):
test_data = None
p = None
salt_length = None
data = []
for line in vector_data:
line = line.strip()
# Blank lines and section headers are ignored
if not line or line.startswith("["):
continue
if line.startswith("# Salt len:"):
salt_length = int(line.split(":")[1].strip())
continue
elif line.startswith("#"):
continue
# Build our data using a simple Key = Value format
name, value = [c.strip() for c in line.split("=")]
if name == "n":
n = int(value, 16)
elif name == "e" and p is None:
e = int(value, 16)
elif name == "p":
p = int(value, 16)
elif name == "q":
q = int(value, 16)
elif name == "SHAAlg":
if p is None:
test_data = {
"modulus": n,
"public_exponent": e,
"salt_length": salt_length,
"algorithm": value,
"fail": False
}
else:
test_data = {
"modulus": n,
"p": p,
"q": q,
"algorithm": value
}
if salt_length is not None:
test_data["salt_length"] = salt_length
data.append(test_data)
elif name == "e" and p is not None:
test_data["public_exponent"] = int(value, 16)
elif name == "d":
test_data["private_exponent"] = int(value, 16)
elif name == "Result":
test_data["fail"] = value.startswith("F")
# For all other tokens we simply want the name, value stored in
# the dictionary
else:
test_data[name.lower()] = value.encode("ascii")
return data
def load_fips_dsa_key_pair_vectors(vector_data):
"""
Loads data out of the FIPS DSA KeyPair vector files.
"""
vectors = []
# When reading_key_data is set to True it tells the loader to continue
# constructing dictionaries. We set reading_key_data to False during the
# blocks of the vectors of N=224 because we don't support it.
reading_key_data = True
for line in vector_data:
line = line.strip()
if not line or line.startswith("#"):
continue
elif line.startswith("[mod = L=1024"):
continue
elif line.startswith("[mod = L=2048, N=224"):
reading_key_data = False
continue
elif line.startswith("[mod = L=2048, N=256"):
reading_key_data = True
continue
elif line.startswith("[mod = L=3072"):
continue
if not reading_key_data:
continue
elif reading_key_data:
if line.startswith("P"):
vectors.append({'p': int(line.split("=")[1], 16)})
elif line.startswith("Q"):
vectors[-1]['q'] = int(line.split("=")[1], 16)
elif line.startswith("G"):
vectors[-1]['g'] = int(line.split("=")[1], 16)
elif line.startswith("X") and 'x' not in vectors[-1]:
vectors[-1]['x'] = int(line.split("=")[1], 16)
elif line.startswith("X") and 'x' in vectors[-1]:
vectors.append({'p': vectors[-1]['p'],
'q': vectors[-1]['q'],
'g': vectors[-1]['g'],
'x': int(line.split("=")[1], 16)
})
elif line.startswith("Y"):
vectors[-1]['y'] = int(line.split("=")[1], 16)
return vectors
def load_fips_dsa_sig_vectors(vector_data):
"""
Loads data out of the FIPS DSA SigVer vector files.
"""
vectors = []
sha_regex = re.compile(
r"\[mod = L=...., N=..., SHA-(?P<sha>1|224|256|384|512)\]"
)
# When reading_key_data is set to True it tells the loader to continue
# constructing dictionaries. We set reading_key_data to False during the
# blocks of the vectors of N=224 because we don't support it.
reading_key_data = True
for line in vector_data:
line = line.strip()
if not line or line.startswith("#"):
continue
sha_match = sha_regex.match(line)
if sha_match:
digest_algorithm = "SHA-{0}".format(sha_match.group("sha"))
if line.startswith("[mod = L=2048, N=224"):
reading_key_data = False
continue
elif line.startswith("[mod = L=2048, N=256"):
reading_key_data = True
continue
if not reading_key_data or line.startswith("[mod"):
continue
name, value = [c.strip() for c in line.split("=")]
if name == "P":
vectors.append({'p': int(value, 16),
'digest_algorithm': digest_algorithm})
elif name == "Q":
vectors[-1]['q'] = int(value, 16)
elif name == "G":
vectors[-1]['g'] = int(value, 16)
elif name == "Msg" and 'msg' not in vectors[-1]:
hexmsg = value.strip().encode("ascii")
vectors[-1]['msg'] = binascii.unhexlify(hexmsg)
elif name == "Msg" and 'msg' in vectors[-1]:
hexmsg = value.strip().encode("ascii")
vectors.append({'p': vectors[-1]['p'],
'q': vectors[-1]['q'],
'g': vectors[-1]['g'],
'digest_algorithm':
vectors[-1]['digest_algorithm'],
'msg': binascii.unhexlify(hexmsg)})
elif name == "X":
vectors[-1]['x'] = int(value, 16)
elif name == "Y":
vectors[-1]['y'] = int(value, 16)
elif name == "R":
vectors[-1]['r'] = int(value, 16)
elif name == "S":
vectors[-1]['s'] = int(value, 16)
elif name == "Result":
vectors[-1]['result'] = value.split("(")[0].strip()
return vectors
# http://tools.ietf.org/html/rfc4492#appendix-A
_ECDSA_CURVE_NAMES = {
"P-192": "secp192r1",
"P-224": "secp224r1",
"P-256": "secp256r1",
"P-384": "secp384r1",
"P-521": "secp521r1",
"K-163": "sect163k1",
"K-233": "sect233k1",
"K-283": "sect283k1",
"K-409": "sect409k1",
"K-571": "sect571k1",
"B-163": "sect163r2",
"B-233": "sect233r1",
"B-283": "sect283r1",
"B-409": "sect409r1",
"B-571": "sect571r1",
}
def load_fips_ecdsa_key_pair_vectors(vector_data):
"""
Loads data out of the FIPS ECDSA KeyPair vector files.
"""
vectors = []
key_data = None
for line in vector_data:
line = line.strip()
if not line or line.startswith("#"):
continue
if line[1:-1] in _ECDSA_CURVE_NAMES:
curve_name = _ECDSA_CURVE_NAMES[line[1:-1]]
elif line.startswith("d = "):
if key_data is not None:
vectors.append(key_data)
key_data = {
"curve": curve_name,
"d": int(line.split("=")[1], 16)
}
elif key_data is not None:
if line.startswith("Qx = "):
key_data["x"] = int(line.split("=")[1], 16)
elif line.startswith("Qy = "):
key_data["y"] = int(line.split("=")[1], 16)
if key_data is not None:
vectors.append(key_data)
return vectors
def load_fips_ecdsa_signing_vectors(vector_data):
"""
Loads data out of the FIPS ECDSA SigGen vector files.
"""
vectors = []
curve_rx = re.compile(
r"\[(?P<curve>[PKB]-[0-9]{3}),SHA-(?P<sha>1|224|256|384|512)\]"
)
data = None
for line in vector_data:
line = line.strip()
if not line or line.startswith("#"):
continue
curve_match = curve_rx.match(line)
if curve_match:
curve_name = _ECDSA_CURVE_NAMES[curve_match.group("curve")]
digest_name = "SHA-{0}".format(curve_match.group("sha"))
elif line.startswith("Msg = "):
if data is not None:
vectors.append(data)
hexmsg = line.split("=")[1].strip().encode("ascii")
data = {
"curve": curve_name,
"digest_algorithm": digest_name,
"message": binascii.unhexlify(hexmsg)
}
elif data is not None:
if line.startswith("Qx = "):
data["x"] = int(line.split("=")[1], 16)
elif line.startswith("Qy = "):
data["y"] = int(line.split("=")[1], 16)
elif line.startswith("R = "):
data["r"] = int(line.split("=")[1], 16)
elif line.startswith("S = "):
data["s"] = int(line.split("=")[1], 16)
elif line.startswith("d = "):
data["d"] = int(line.split("=")[1], 16)
elif line.startswith("Result = "):
data["fail"] = line.split("=")[1].strip()[0] == "F"
if data is not None:
vectors.append(data)
return vectors
def load_kasvs_dh_vectors(vector_data):
"""
Loads data out of the KASVS key exchange vector data
"""
result_rx = re.compile(r"([FP]) \(([0-9]+) -")
vectors = []
data = {
"fail_z": False,
"fail_agree": False
}
for line in vector_data:
line = line.strip()
if not line or line.startswith("#"):
continue
if line.startswith("P = "):
data["p"] = int(line.split("=")[1], 16)
elif line.startswith("Q = "):
data["q"] = int(line.split("=")[1], 16)
elif line.startswith("G = "):
data["g"] = int(line.split("=")[1], 16)
elif line.startswith("Z = "):
z_hex = line.split("=")[1].strip().encode("ascii")
data["z"] = binascii.unhexlify(z_hex)
elif line.startswith("XstatCAVS = "):
data["x1"] = int(line.split("=")[1], 16)
elif line.startswith("YstatCAVS = "):
data["y1"] = int(line.split("=")[1], 16)
elif line.startswith("XstatIUT = "):
data["x2"] = int(line.split("=")[1], 16)
elif line.startswith("YstatIUT = "):
data["y2"] = int(line.split("=")[1], 16)
elif line.startswith("Result = "):
result_str = line.split("=")[1].strip()
match = result_rx.match(result_str)
if match.group(1) == "F":
if int(match.group(2)) in (5, 10):
data["fail_z"] = True
else:
data["fail_agree"] = True
vectors.append(data)
data = {
"p": data["p"],
"q": data["q"],
"g": data["g"],
"fail_z": False,
"fail_agree": False
}
return vectors
| viraptor/cryptography | tests/utils.py | Python | apache-2.0 | 21,888 |
# -*- encoding: utf-8 -*-
import random
# V tej datoteki so definicije razredov Snake in Field ter nekaj pomožnih konstant in
# funkcij.
# Igralno polje sestoji iz mreze kvadratkov (blokov)
WIDTH = 50 # sirina polja (stevilo blokov)
HEIGHT = 30 # visina polja
BLOCK = 20 # velikost enega bloka v tockah na zaslonu
# Pomozne funkcije
def brick(canvas, x, y):
"""Ustvari graficni element, ki predstavlja opeko (na robu polja)."""
return canvas.create_rectangle(x*BLOCK, y*BLOCK, (x+1)*BLOCK, (y+1)*BLOCK,
fill='brown', width=2)
def mouse(canvas, x, y):
"""Ustvari graficni element, ki predstavlja misko."""
return canvas.create_oval(x*BLOCK+2, y*BLOCK+2, (x+1)*BLOCK-2, (y+1)*BLOCK-2,
fill = 'gray')
# Razredi
class Snake():
"""Razred, ki predstavlja kaco.
Vse kace v igrici so podrazredi tega razreda. Objekt razreda Snake
ima naslednje atribute:
field -- objekt razreda Field, v katerem je kaca
(dx, dy) -- smerni vektor premikanja, eden od (-1,0), (1,0), (0,-1), (0,1)
grow -- za koliko clenkov mora kaca zrasti
color_head -- barva glave
color_tail -- barva repa
coords -- seznam koordinat clenkov kace (glava je coords[0])
cells -- seznam graficnih elementov, ki predstavljajo kaco
"""
def __init__(self, field, color_head, color_tail, x, y, dx, dy):
self.field = field
self.dx = dx
self.dy = dy
self.grow = 0
self.color_head = color_head
self.color_tail = color_tail
self.coords = []
self.cells = []
# the tail
for k in range(2, 0, -1):
self.add_cell(x - k * self.dx, y - k * self.dy)
self.add_cell(x, y) # the head
def add_cell(self, x, y):
"""Dodaj kaci novo celico."""
cell = self.field.canvas.create_oval(
x*BLOCK, y*BLOCK, (x+1)*BLOCK, (y+1)*BLOCK, fill = self.color_head)
if len(self.cells) > 0:
self.field.canvas.itemconfigure(self.cells[0], fill=self.color_tail)
self.coords.insert(0, (x, y))
self.cells.insert(0, cell)
def turn_left(self):
"""Obrni kaco v levo."""
(self.dx, self.dy) = (-self.dy, self.dx)
def turn_right(self):
"""Obrni kaco v desno."""
(self.dx, self.dy) = (self.dy, -self.dx)
def move(self):
"""Premakni kaco v smer, v katero je obrnjena.
Ce je na polju, kamor se premaknemo, miska, jo pojemo.
Ce je polje zasedeno z drugo kaco ali opeko, se ne zgodi nic."""
(x,y) = self.coords[0]
x += self.dx
y += self.dy
if self.field.is_mouse(x,y):
self.grow = 1
self.field.remove_mouse(x,y)
if self.field.is_empty(x,y):
if self.grow > 0:
self.grow -= 1
self.add_cell(x, y)
else:
# Reuse the last one
self.coords.pop()
self.coords.insert(0, (x,y))
self.field.canvas.itemconfigure(self.cells[0], fill=self.color_tail)
cell = self.cells.pop()
self.field.canvas.coords(cell, x*BLOCK, y*BLOCK, (x+1)*BLOCK, (y+1)*BLOCK)
self.field.canvas.itemconfigure(cell, fill=self.color_head)
self.cells.insert(0, cell)
def turn(self):
"""Po potrebi obrni kaco.
Ta funkcija ne dela nicesar in jo je treba redefinirati v podrazredu,
ki predstavlja kaco, glej prilozene primere."""
pass
class Field():
"""Igralno polje, po katerem se gibljejo kace.
Atributi:
width -- sirina polja
height -- visina polja
snakes -- seznam kac, ki so v polju
mice -- slovar, ki slika koordinate misk v id-je pripadajocih graficnih objektov
"""
def __init__(self, canvas, width, height):
self.width = width
self.height = height
self.canvas = canvas
self.snakes = []
self.mice = {}
self.bricks = []
# The bricks
for i in range(width):
self.bricks.append(brick(canvas, i, 0))
self.bricks.append(brick(canvas, i, height-1))
for j in range(1, height-1):
self.bricks.append(brick(canvas, 0, j))
self.bricks.append(brick(canvas, width-1, j))
def add_snake(self, s):
"""Dodaj novo kaco v polje."""
s.id = len(self.snakes)
self.snakes.append(s)
def is_mouse(self, x, y):
"""Ali je na lokaciji (x,y) miska?"""
return (0 < x < self.width-1 and
0 < y < self.height-1 and
(x,y) in self.mice)
def is_empty(self, x, y):
"""Ali je polje (x,y) prazno?"""
if (0 < x < self.width-1 and
0 < y < self.height-1 and
(x,y) not in self.mice):
for s in self.snakes:
if (x,y) in s.coords: return False
return True
else:
return False
def find_empty(self):
"""Nakljucno izberi prazno polje, poskusi najvec petkrat."""
for i in range(5):
x = random.randint(1, self.width-2)
y = random.randint(1, self.height-2)
if self.is_empty(x, y):
return (x,y)
return (None, None)
def new_mouse(self):
"""Dodaj misko na nakljucno izbrano polje."""
(x,y) = self.find_empty()
if x and y:
self.mice[(x,y)] = mouse(self.canvas, x, y)
def remove_mouse(self, x, y):
"""Odstrani misko na lokaciji (x,y)."""
m = self.mice.get((x,y))
if m:
self.canvas.delete(m)
del self.mice[(x,y)]
| andrejbauer/snakes | snake.py | Python | mit | 5,882 |
"""
Module containing the Independent class to handle all operations pertaining
to the independent model.
"""
import os
import pandas as pd
class Independent:
"""Returns an Independent object that reads in the data, splits into sets,
trains and classifies, and writes the results."""
def __init__(self, config_obj, classification_obj, util_obj):
"""Initializes object dependencies for this class."""
self.config_obj = config_obj
"""Configuration object with user settings."""
self.classification_obj = classification_obj
"""Object that handles classification of the data."""
self.util_obj = util_obj
"""Class containing general utility methods."""
# public
def main(self):
"""Main method that reads in the comments, splits them into train and
test, writes them to files, and prints out stats.
Returns the train and test comment dataframes."""
modified = self.config_obj.modified
self.util_obj.start()
data_f, fold_f, status_f = self.file_folders()
sw = self.open_status_writer(status_f)
coms_filename = self.util_obj.get_comments_filename(modified)
coms_df = self.read_file(data_f + coms_filename, sw)
train_df, val_df, test_df = self.split_coms(coms_df)
if self.config_obj.alter_user_ids:
self.alter_user_ids(coms_df, test_df)
self.write_folds(val_df, test_df, fold_f)
self.print_subsets(train_df, val_df, test_df, fw=sw)
self.util_obj.start('\nvalidation set:\n', fw=sw)
self.classification_obj.main(train_df, val_df, dset='val', fw=sw)
self.util_obj.end('time: ', fw=sw)
self.util_obj.start('\ntest set:\n', fw=sw)
all_train_df = train_df.copy()
if self.config_obj.super_train:
all_train_df = pd.concat([train_df, val_df])
self.classification_obj.main(all_train_df, test_df, dset='test', fw=sw)
self.util_obj.end('time: ', fw=sw)
self.util_obj.end('total independent model time: ', fw=sw)
self.util_obj.close_writer(sw)
return val_df, test_df
# private
def file_folders(self):
"""Returns absolute paths for various directories."""
ind_dir = self.config_obj.ind_dir
domain = self.config_obj.domain
data_f = ind_dir + 'data/' + domain + '/'
fold_f = ind_dir + 'data/' + domain + '/folds/'
status_f = ind_dir + 'output/' + domain + '/status/'
if not os.path.exists(fold_f):
os.makedirs(fold_f)
if not os.path.exists(status_f):
os.makedirs(status_f)
return data_f, fold_f, status_f
def open_status_writer(self, status_f):
"""Opens a file to write updates of the independent model.
status_f: status folder.
Returns file object to write to."""
fold = self.config_obj.fold
fname = status_f + 'ind_' + fold + '.txt'
f = self.util_obj.open_writer(fname)
return f
def read_file(self, filename, fw=None):
"""Reads the appropriate comments file of the domain.
filename: csv comments file.
Returns comments dataframe up to the end marker in the config."""
self.util_obj.start('loading data...', fw=fw)
coms_df = pd.read_csv(filename, lineterminator='\n',
nrows=self.config_obj.end)
self.util_obj.end(fw=fw)
return coms_df
def split_coms(self, coms_df):
"""Splits the comments into training, validation, and test sets.
coms_df: comments dataframe.
Returns train, val, and test dataframes."""
start = self.config_obj.start
train_size = self.config_obj.train_size
val_size = self.config_obj.val_size
coms_df = coms_df[start:]
num_coms = len(coms_df)
split_ndx1 = int(num_coms * train_size)
split_ndx2 = split_ndx1 + int(num_coms * val_size)
train_df = coms_df[:split_ndx1]
val_df = coms_df[split_ndx1:split_ndx2]
test_df = coms_df[split_ndx2:]
return train_df, val_df, test_df
def alter_user_ids(self, coms_df, test_df):
"""Alters the user ids in the test set so that all spam messages
are posted by a different user.
test_df: test set dataframe.
Returns altered test set with different user ids for each spammer."""
max_user_id = coms_df['user_id'].max() + 1
user_ids = list(zip(test_df['label'], test_df['user_id']))
new_user_ids = []
for label, user_id in user_ids:
new_user_ids.append(max_user_id if label == 1 else user_id)
max_user_id += 1
test_df['user_id'] = new_user_ids
def write_folds(self, val_df, test_df, fold_f):
"""Writes validation and test set dataframes to csv files.
val_df: dataframe with validation set comments.
test_df: dataframe with test set comments.
fold_f: folder to save the data to."""
fold = self.config_obj.fold
val_fname = fold_f + 'val_' + fold + '.csv'
test_fname = fold_f + 'test_' + fold + '.csv'
val_df.to_csv(val_fname, line_terminator='\n', index=None)
test_df.to_csv(test_fname, line_terminator='\n', index=None)
def print_subsets(self, train_df, val_df, test_df, fw=None):
"""Writes basic statistics about the training and test sets.
train_df: training set comments.
test_df: test set comments."""
spam, total = len(train_df[train_df['label'] == 1]), len(train_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\ttraining set size: ' + str(len(train_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
spam, total = len(val_df[val_df['label'] == 1]), len(val_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\tvalidation set size: ' + str(len(val_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
spam, total = len(test_df[test_df['label'] == 1]), len(test_df)
percentage = round(self.util_obj.div0(spam, total) * 100, 1)
s = '\ttest set size: ' + str(len(test_df)) + ', '
s += 'spam: ' + str(spam) + ' (' + str(percentage) + '%)'
self.util_obj.write(s, fw=fw)
| jjbrophy47/sn_spam | independent/scripts/independent.py | Python | mit | 6,452 |
def draw_box( ax,p0,p1,color='w' ):
ax.plot( [p0[0],p1[0]] , [p0[1],p0[1]], [p0[2],p0[2]],color)
ax.plot( [p1[0],p1[0]] , [p0[1],p0[1]], [p0[2],p1[2]],color)
ax.plot( [p1[0],p0[0]] , [p0[1],p0[1]], [p1[2],p1[2]],color)
ax.plot( [p0[0],p0[0]] , [p0[1],p0[1]], [p1[2],p0[2]],color)
ax.plot( [p0[0],p1[0]] , [p1[1],p1[1]], [p0[2],p0[2]],color)
ax.plot( [p1[0],p1[0]] , [p1[1],p1[1]], [p0[2],p1[2]],color)
ax.plot( [p1[0],p0[0]] , [p1[1],p1[1]], [p1[2],p1[2]],color)
ax.plot( [p0[0],p0[0]] , [p1[1],p1[1]], [p1[2],p0[2]],color)
ax.plot( [p0[0],p0[0]] , [p1[1],p0[1]], [p0[2],p0[2]],color)
ax.plot( [p1[0],p1[0]] , [p1[1],p0[1]], [p0[2],p0[2]],color)
ax.plot( [p0[0],p0[0]] , [p1[1],p0[1]], [p1[2],p1[2]],color)
ax.plot( [p1[0],p1[0]] , [p1[1],p0[1]], [p1[2],p1[2]],color)
def draw_particle(ax,x,y,z):
ax.scatter(x,y,z)
| sgh1/hash3 | example/draw_hash.py | Python | gpl-3.0 | 879 |
#!/usr/bin/env python
#
# fizzbuzz1.py - standard interview question solution from
# "Coding Horror" - See
# http://blog.codinghorror.com/why-cant-programmers-program/
#
# Copyright (C) 2018 Michael Davies <michael@the-davies.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Or try here: http://www.fsf.org/copyleft/gpl.html
#
from __future__ import print_function
import sys
def fizzbuzz(start, end):
for i in range(start, end+1):
printed = False
if i % 3 == 0:
printed = True
print("Fizz", end='')
if i % 5 == 0:
printed = True
print("Buzz", end='')
if printed:
print('')
else:
print(i)
if __name__ == '__main__':
if len(sys.argv) == 3:
start = int(sys.argv[1])
end = int(sys.argv[2])
else:
start = 1
end = 100
fizzbuzz(start, end)
| mrda/junkcode | fizzbuzz1.py | Python | gpl-2.0 | 1,564 |
__author__ = 'jaisaacs'
###
# Serial interface for control of the Newport Vertical Stage
#
# Joshua A Isaacs 2016/11/3
#
#
###
import serial
import serial.tools.list_ports
import logging
logger = logging.getLogger(__name__)
class Newport():
#ser_add = '' #'COM6'# Address of serial controller for stage
#motion_dict = dict(home = 'XH',pos = 'XR', moveAbs = 'XG ', setVel = 'XV ')
minpos = -13700
maxpos = 13700
def __init__(self,comport,axis='X'):
self.axis = 'X'
self.setaxis(axis)
ports = serial.tools.list_ports.comports()
# print ports
# Try comport first
self.comport_bad = False
self.ser_add = comport
try:
self.ser = serial.Serial(self.ser_add)
except serial.SerialException as e:
logger.exception(e)
self.comport_bad = True
if not self.comport_bad:
if not self.ser.isOpen():
try:
self.ser.open()
except Exception as e:
logger.exception(e)
self.ser.timeout = 1
self.ser.xonxoff = True
self.WriteThenPrint('COMOPT3')
self.comport_bad = not self.test_port()
if self.comport_bad:
for port in ports:
#print port[0]
if port[0] == comport:
continue
try:
self.ser = serial.Serial(port[0])
logger.info("Opened Port {}".format(port[0]))
except serial.SerialException as e:
logger.exception(e)
continue
if not self.ser.isOpen():
try:
self.ser.open()
logger.info("{} is not open".format(comport))
except Exception as e:
logger.exception(e)
self.ser.timeout = 1
self.ser.xonxoff =True
self.WriteThenPrint('COMOPT3')
if self.test_port():
logger.info("Port {} is initialized, Axis = {}".format(port[0], self.axis))
break
else:
self.ser.close()
self.ser = None
def WriteThenPrint(self,s):
self.ser.write((s+'\n\r').encode('utf-8'))
response = self.ser.readlines()
for i in response:
logger.info(i.rstrip())
def WriteThenStore(self,s):
self.ser.write((s+'\n\r').encode('utf-8'))
response = self.ser.readlines()
logger.info(response)
return response
def home(self): self.WriteThenStore(self.axis+'H')
def setVelocity(self,vel): self.WriteThenStore(self.axis+'V {}'.format(vel))
def moveAbs(self,pos): self.WriteThenStore(self.axis+'G {}'.format(pos))
def moveAbsCheck(self,pos):
'''
Moves stage to position "pos" and acknowledges arrival with message
:param pos:
:return:
'''
output = self.WriteThenStore(self.axis+'G {}'.format(pos))
done = ''
while done != self.axis+'D':
done = self.status()
logger.info('Status: {}\n'.format(done))
logger.info('Calibration: Complete!')
def status(self): return self.WriteThenStore(self.axis+'STAT')[0].rstrip()[-2:]
def whereAmI(self):
output = self.WriteThenStore(self.axis+'R')[1].rstrip()[2:]
while output == '':
output = self.WriteThenStore(self.axis+'R')[1].rstrip()[2:]
return float(output)
def findCenter(self,side=-1):
self.WriteThenStore(self.axis+'F {}'.format(side))
done = ''
while done != self.axis+'D':
done = self.status()
logger.info('Status: {}\n'.format(done))
logger.info('Center: Found!')
def calibrateStage(self):
self.WriteThenStore(self.axis+'AZ')
done = ''
while done != self.axis+'D':
done = self.status()
logger.info('Status: {}\n'.format(done))
logger.info('Calibration: Complete!')
def setaxis(self,axis):
if axis in ['X','Y','Z']:
self.axis = axis
#print 'Axis is {}'.format(self.axis)
else:
logger.warning("Invalid axis parameter passed to "
"NewportMotionController class. "
"Valid values are X, Y, Z. Defaulting to X.")
self.axis='X'
def test_port(self):
'''
Tests the current COM port to make sure correct device is being addressed. Currently a hacky workaround.
:return: Good port: Boolean, Is the port the correct port?
'''
try:
self.whereAmI()
except IndexError:
logger.info("There was an index Error. Probably wrong COM port")
return False
logger.info("No Errors, probably the right port")
return True
| QuantumQuadrate/CsPyController | python/NewportStageController.py | Python | lgpl-3.0 | 5,025 |
#
# Copyright 2005,2006,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
PSK modulation and demodulation.
"""
from math import pi, log
from cmath import exp
import digital_swig
import modulation_utils
from utils import mod_codes, gray_code
from generic_mod_demod import generic_mod, generic_demod
from generic_mod_demod import shared_mod_args, shared_demod_args
# Default number of points in constellation.
_def_constellation_points = 4
# The default encoding (e.g. gray-code, set-partition)
_def_mod_code = mod_codes.GRAY_CODE
# Default use of differential encoding
_def_differential = True
def create_encodings(mod_code, arity, differential):
post_diff_code = None
if mod_code not in mod_codes.codes:
raise ValueError('That modulation code does not exist.')
if mod_code == mod_codes.GRAY_CODE:
if differential:
pre_diff_code = gray_code.gray_code(arity)
post_diff_code = None
else:
pre_diff_code = []
post_diff_code = gray_code.gray_code(arity)
elif mod_code == mod_codes.NO_CODE:
pre_diff_code = []
post_diff_code = None
else:
raise ValueError('That modulation code is not implemented for this constellation.')
return (pre_diff_code, post_diff_code)
# /////////////////////////////////////////////////////////////////////////////
# PSK constellation
# /////////////////////////////////////////////////////////////////////////////
def psk_constellation(m=_def_constellation_points, mod_code=_def_mod_code,
differential=_def_differential):
"""
Creates a PSK constellation object.
"""
k = log(m) / log(2.0)
if (k != int(k)):
raise StandardError('Number of constellation points must be a power of two.')
points = [exp(2*pi*(0+1j)*i/m) for i in range(0,m)]
pre_diff_code, post_diff_code = create_encodings(mod_code, m, differential)
if post_diff_code is not None:
inverse_post_diff_code = mod_codes.invert_code(post_diff_code)
points = [points[x] for x in inverse_post_diff_code]
constellation = digital_swig.constellation_psk(points, pre_diff_code, m)
return constellation
# /////////////////////////////////////////////////////////////////////////////
# PSK modulator
# /////////////////////////////////////////////////////////////////////////////
class psk_mod(generic_mod):
"""
Hierarchical block for RRC-filtered PSK modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
Args:
constellation_points: Number of constellation points (must be a power of two) (integer).
mod_code: Whether to use a gray_code (digital.mod_codes.GRAY_CODE) or not (digital.mod_codes.NO_CODE).
differential: Whether to use differential encoding (boolean).
"""
# See generic_mod for additional arguments
__doc__ += shared_mod_args
def __init__(self, constellation_points=_def_constellation_points,
mod_code=_def_mod_code,
differential=_def_differential,
*args, **kwargs):
constellation = psk_constellation(constellation_points, mod_code, differential)
super(psk_mod, self).__init__(constellation, differential, *args, **kwargs)
# /////////////////////////////////////////////////////////////////////////////
# PSK demodulator
#
# /////////////////////////////////////////////////////////////////////////////
class psk_demod(generic_demod):
"""
Hierarchical block for RRC-filtered PSK modulation.
The input is a byte stream (unsigned char) and the
output is the complex modulated signal at baseband.
Args:
constellation_points: Number of constellation points (must be a power of two) (integer).
mod_code: Whether to use a gray_code (digital.mod_codes.GRAY_CODE) or not (digital.mod_codes.NO_CODE).
differential: Whether to use differential encoding (boolean).
"""
# See generic_mod for additional arguments
__doc__ += shared_mod_args
def __init__(self, constellation_points=_def_constellation_points,
mod_code=_def_mod_code,
differential=_def_differential,
*args, **kwargs):
constellation = psk_constellation(constellation_points, mod_code, differential)
super(psk_demod, self).__init__(constellation, differential, *args, **kwargs)
#
# Add these to the mod/demod registry
#
modulation_utils.add_type_1_mod('psk', psk_mod)
modulation_utils.add_type_1_demod('psk', psk_demod)
modulation_utils.add_type_1_constellation('psk', psk_constellation)
| Gabotero/GNURadioNext | gr-digital/python/psk.py | Python | gpl-3.0 | 5,480 |
from __future__ import unicode_literals
import importlib
import inspect
from django.apps import AppConfig
from django.conf import settings
from .manager import manager
class DjangoSchedulerManagerConfig(AppConfig):
name = 'django_schedulermanager'
def ready(self):
apps = settings.INSTALLED_APPS
for app in apps:
try:
jobs_module = importlib.import_module(app + '.jobs')
except ImportError:
continue
members = inspect.getmembers(jobs_module, self.schedulable_only)
for name, instance in members:
manager.add_job(
instance.django_scheduler.id,
instance.django_scheduler
)
def schedulable_only(self, member):
return (
hasattr(member, 'django_scheduler') and
member.django_scheduler.is_schedulable
)
| marcoacierno/django-schedulermanager | django_schedulermanager/apps.py | Python | mit | 927 |
import sys
from tempfile import NamedTemporaryFile, TemporaryFile, mktemp
import os
from numpy import memmap
from numpy import arange, allclose, asarray
from numpy.testing import *
class TestMemmap(TestCase):
def setUp(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3,4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def tearDown(self):
self.tmpfp.close()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
def test_open_with_filename(self):
tmpname = mktemp('','mmap')
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
os.unlink(tmpname)
def test_unnamed_file(self):
f = TemporaryFile()
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
f.close()
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
self.assertEqual(offset, fp.offset)
self.assertEqual(mode, fp.mode)
del fp
def test_filename(self):
tmpname = mktemp('','mmap')
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = os.path.abspath(tmpname)
fp[:] = self.data[:]
self.assertEqual(abspath, fp.filename)
b = fp[:1]
self.assertEqual(abspath, b.filename)
del b
del fp
os.unlink(tmpname)
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
self.assertEqual(fp.filename, self.tmpfp.name)
@dec.knownfailureif(sys.platform=='gnu0', "This test is known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def test_arithmetic_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert tmp._mmap is not fp._mmap
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[[(1, 2), (2, 3)]]
if isinstance(tmp, memmap):
assert tmp._mmap is not fp._mmap
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert fp[:2, :2]._mmap is fp._mmap
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert(new1.base is fp)
assert(new2.base is fp)
new_array = asarray(fp)
assert(new_array.base is fp)
if __name__ == "__main__":
run_module_suite()
| mbalasso/mynumpy | numpy/core/tests/test_memmap.py | Python | bsd-3-clause | 4,069 |
from django import forms
from mc2.controllers.docker.models import DockerController
from mc2.controllers.base.forms import ControllerForm, ControllerFormHelper
class DockerControllerForm(ControllerForm):
marathon_cmd = forms.CharField(
required=False,
widget=forms.Textarea(attrs={'class': 'form-control'}))
marathon_args = forms.CharField(
required=False,
widget=forms.Textarea(attrs={'class': 'form-control'}))
docker_image = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}))
marathon_health_check_path = forms.CharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': '(optional)'}),
required=False)
marathon_health_check_cmd = forms.CharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': '(optional)'}),
required=False)
port = forms.CharField(
widget=forms.NumberInput(attrs={
'class': 'form-control',
'placeholder': '(optional)'}),
required=False)
domain_urls = forms.CharField(
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': '(optional)'}),
required=False)
external_visibility = forms.BooleanField(
required=False,
initial=False,
label="Should the URL be exposed to the outside world?",
help_text=(
"Disabling this field will remove this app from our load balancer "
"and make it only accessible from inside the cluster."),
widget=forms.RadioSelect(choices=[(True, 'Yes'), (False, 'No')]))
volume_needed = forms.BooleanField(
required=False, label="Do you want storage?", initial=False,
widget=forms.RadioSelect(choices=[(True, 'Yes'), (False, 'No')]))
volume_path = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control'}),
required=False)
webhook_token = forms.UUIDField(
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'}))
def clean_port(self):
return self.cleaned_data['port'] or None
class Meta:
model = DockerController
fields = (
'name', 'marathon_cpus', 'marathon_mem', 'marathon_instances',
'marathon_cmd', 'marathon_args', 'docker_image',
'marathon_health_check_path',
'port', 'domain_urls', 'volume_needed', 'volume_path',
'webhook_token', 'description', 'organization',
'postgres_db_needed', 'external_visibility',
'rabbitmq_vhost_needed', 'rabbitmq_vhost_name',
'marathon_health_check_cmd',)
class DockerControllerFormHelper(ControllerFormHelper):
def __init__(self, data=None, files=None, instance=None,
prefix=None, initial={}):
super(DockerControllerFormHelper, self).__init__(
data, files, instance, prefix, initial)
self.controller_form = DockerControllerForm(
data, files, instance=instance, initial=initial)
| praekelt/mc2 | mc2/controllers/docker/forms.py | Python | bsd-2-clause | 3,122 |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
switch_pin = 23
GPIO.setup(18, GPIO.OUT) #18 is output pin to LED
GPIO.setup(switch_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.output(18, True)
while True:
if GPIO.input(switch_pin) == False:
GPIO.output(18, False)
time.sleep(0.2)
| cfoale/ILOCI | Receiver-OLED/pi/projects/OLEDPython/led and button.py | Python | gpl-3.0 | 328 |
# -*- coding:utf-8 -*-
"""SQLite parser plugin for Twitter on iOS 8+ database files."""
from __future__ import unicode_literals
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class TwitterIOSContactEventData(events.EventData):
"""Twitter on iOS 8+ contact event data.
Attributes:
description (str): description of the profile.
followers_count (int): number of accounts following the contact.
following_count (int): number of accounts the contact is following.
following (int): 1 if the contact is following the user's account, 0 if not.
location (str): location of the profile.
name (str): name of the profile.
profile_url (str): URL of the profile picture.
screen_name (str): screen name.
url (str): URL of the profile.
"""
DATA_TYPE = 'twitter:ios:contact'
def __init__(self):
"""Initializes event data."""
super(TwitterIOSContactEventData, self).__init__(data_type=self.DATA_TYPE)
self.description = None
self.followers_count = None
self.following = None
self.following_count = None
self.location = None
self.name = None
self.profile_url = None
self.screen_name = None
self.url = None
class TwitterIOSStatusEventData(events.EventData):
"""Parent class for Twitter on iOS 8+ status events.
Attributes:
favorite_count (int): number of times the status message has been favorited.
favorited (int): value to mark status as favorite by the account.
name (str): user's profile name.
retweet_count (str): number of times the status message has been retweeted.
text (str): content of the status message.
user_id (int): user unique identifier.
"""
DATA_TYPE = 'twitter:ios:status'
def __init__(self):
"""Initializes event data."""
super(TwitterIOSStatusEventData, self).__init__(data_type=self.DATA_TYPE)
self.favorite_count = None
self.favorited = None
self.name = None
self.retweet_count = None
self.text = None
self.user_id = None
class TwitterIOSPlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Twitter on iOS 8+ database files.
The Twitter on iOS 8+ database file is typically stored in:
/private/var/mobile/Containers/Data/Application/Library/Caches/databases/
twitter.db
"""
NAME = 'twitter_ios'
DATA_FORMAT = 'Twitter on iOS 8 and later SQLite database (twitter.db) file'
REQUIRED_STRUCTURE = {
'Users': frozenset([
'createdDate', 'updatedAt', 'screenName', 'name', 'profileImageUrl',
'location', 'description', 'url', 'following', 'followersCount',
'followingCount', 'id']),
'Statuses': frozenset([
'date', 'text', 'userId', 'retweetCount', 'favoriteCount',
'favorited', 'updatedAt'])}
QUERIES = [
(('SELECT createdDate, updatedAt, screenName, name, profileImageUrl,'
'location, description, url, following, followersCount, followingCount'
' FROM Users ORDER BY createdDate'), 'ParseContactRow'),
(('SELECT Statuses.date AS date, Statuses.text AS text, Statuses.userId '
'AS user_id, Users.name AS name, Statuses.retweetCount AS '
'retweetCount, Statuses.favoriteCount AS favoriteCount, '
'Statuses.favorited AS favorited, Statuses.updatedAt AS updatedAt '
'FROM Statuses LEFT join Users ON Statuses.userId = Users.id ORDER '
'BY date'), 'ParseStatusRow')]
SCHEMAS = [{
'Lists': (
'CREATE TABLE Lists ( \'id\' INTEGER PRIMARY KEY, \'name\' TEXT, '
'\'slug\' TEXT, \'desc\' TEXT, \'private\' INTEGER, '
'\'subscriberCount\' INTEGER, \'memberCount\' INTEGER, \'userId\' '
'INTEGER, \'updatedAt\' REAL )'),
'ListsShadow': (
'CREATE TABLE ListsShadow ( \'id\' INTEGER PRIMARY KEY, \'name\' '
'TEXT, \'slug\' TEXT, \'desc\' TEXT, \'private\' INTEGER, '
'\'subscriberCount\' INTEGER, \'memberCount\' INTEGER, \'userId\' '
'INTEGER, \'updatedAt\' REAL )'),
'MyRetweets': (
'CREATE TABLE MyRetweets ( \'statusId\' INTEGER PRIMARY KEY, '
'\'myRetweetId\' INTEGER )'),
'Statuses': (
'CREATE TABLE Statuses ( \'id\' INTEGER PRIMARY KEY, \'text\' TEXT, '
'\'date\' REAL, \'userId\' INTEGER, \'inReplyToStatusId\' INTEGER, '
'\'retweetedStatusId\' INTEGER, \'geotag\' BLOB, \'entities\' BLOB, '
'\'card\' BLOB, \'cardUsers\' BLOB, \'primaryCardType\' INTEGER, '
'\'cardVersion\' INTEGER, \'retweetCount\' INTEGER, '
'\'favoriteCount\' INTEGER, \'favorited\' INTEGER, \'updatedAt\' '
'REAL, \'extraScribeItem\' BLOB, \'withheldScope\' TEXT, '
'\'withheldInCountries\' TEXT, \'inReplyToUsername\' TEXT, '
'\'possiblySensitive\' INTEGER, \'isPossiblySensitiveAppealable\' '
'INTEGER, \'isLifelineAlert\' INTEGER, \'isTruncated\' INTEGER, '
'\'previewLength\' INTEGER, \'fullTextLength\' INTEGER, \'lang\' '
'TEXT, \'supplmentalLanguage\' TEXT, \'includeInProfileTimeline\' '
'INTEGER, \'quotedStatusId\' INTEGER, \'source\' TEXT )'),
'StatusesShadow': (
'CREATE TABLE StatusesShadow ( \'id\' INTEGER PRIMARY KEY, \'text\' '
'TEXT, \'date\' REAL, \'userId\' INTEGER, \'inReplyToStatusId\' '
'INTEGER, \'retweetedStatusId\' INTEGER, \'geotag\' BLOB, '
'\'entities\' BLOB, \'card\' BLOB, \'cardUsers\' BLOB, '
'\'primaryCardType\' INTEGER, \'cardVersion\' INTEGER, '
'\'retweetCount\' INTEGER, \'favoriteCount\' INTEGER, \'favorited\' '
'INTEGER, \'updatedAt\' REAL, \'extraScribeItem\' BLOB, '
'\'withheldScope\' TEXT, \'withheldInCountries\' TEXT, '
'\'inReplyToUsername\' TEXT, \'possiblySensitive\' INTEGER, '
'\'isPossiblySensitiveAppealable\' INTEGER, \'isLifelineAlert\' '
'INTEGER, \'isTruncated\' INTEGER, \'previewLength\' INTEGER, '
'\'fullTextLength\' INTEGER, \'lang\' TEXT, '
'\'supplementalLanguage\' TEXT, \'includeInProfileTimeline\' '
'INTEGER, \'quotedStatusId\' INTEGER, \'source\' TEXT )'),
'Users': (
'CREATE TABLE Users ( \'id\' INTEGER PRIMARY KEY, \'screenName\' '
'TEXT COLLATE NOCASE, \'profileImageUrl\' TEXT, '
'\'profileBannerUrl\' TEXT, \'profileLinkColorHexTriplet\' INTEGER, '
'\'name\' TEXT, \'location\' TEXT, \'structuredLocation\' BLOB, '
'\'description\' TEXT, \'url\' TEXT, \'urlEntities\' BLOB, '
'\'bioEntities\' BLOB, \'protected\' INTEGER, \'verified\' INTEGER, '
'\'following\' INTEGER, \'deviceFollowing\' INTEGER, '
'\'advertiserAccountType\' INTEGER, \'statusesCount\' INTEGER, '
'\'mediaCount\' INTEGER, \'favoritesCount\' INTEGER, '
'\'followingCount\' INTEGER, \'followersCount\' INTEGER, '
'\'followersCountFast\' INTEGER, \'followersCountNormal\' INTEGER, '
'\'couldBeStale\' INTEGER, \'isLifelineInstitution\' INTEGER, '
'\'hasCollections\' INTEGER, \'updatedAt\' REAL, \'createdDate\' '
'REAL, \'isTranslator\' INTEGER, \'hasExtendedProfileFields\' '
'INTEGER, \'extendedProfileFields\' BLOB, \'pinnedTweetId\' '
'INTEGER, \'businessProfileState\' INTEGER, \'analyticsType\' '
'INTEGER )'),
'UsersShadow': (
'CREATE TABLE UsersShadow ( \'id\' INTEGER PRIMARY KEY, '
'\'screenName\' TEXT COLLATE NOCASE, \'profileImageUrl\' TEXT, '
'\'profileBannerUrl\' TEXT, \'profileLinkColorHexTriplet\' INTEGER, '
'\'name\' TEXT, \'location\' TEXT, \'structuredLocation\' BLOB, '
'\'description\' TEXT, \'url\' TEXT, \'urlEntities\' BLOB, '
'\'bioEntities\' BLOB, \'protected\' INTEGER, \'verified\' INTEGER, '
'\'following\' INTEGER, \'deviceFollowing\' INTEGER, '
'\'advertiserAccountType\' INTEGER, \'statusesCount\' INTEGER, '
'\'mediaCount\' INTEGER, \'favoritesCount\' INTEGER, '
'\'followingCount\' INTEGER, \'followersCount\' INTEGER, '
'\'followersCountFast\' INTEGER, \'followersCountNormal\' INTEGER, '
'\'couldBeStale\' INTEGER, \'isLifelineInstitution\' INTEGER, '
'\'hasCollections\' INTEGER, \'updatedAt\' REAL, \'createdDate\' '
'REAL, \'isTranslator\' INTEGER, \'hasExtendedProfileFields\' '
'INTEGER, \'extendedProfileFields\' BLOB, \'pinnedTweetId\' '
'INTEGER, \'businessProfileState\' INTEGER, \'analyticsType\' '
'INTEGER )')}]
def ParseContactRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a contact row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
event_data = TwitterIOSContactEventData()
event_data.description = self._GetRowValue(query_hash, row, 'description')
event_data.followers_count = self._GetRowValue(
query_hash, row, 'followersCount')
event_data.following = self._GetRowValue(query_hash, row, 'following')
event_data.following_count = self._GetRowValue(
query_hash, row, 'followingCount')
event_data.location = self._GetRowValue(query_hash, row, 'location')
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.profile_url = self._GetRowValue(
query_hash, row, 'profileImageUrl')
event_data.query = query
event_data.screen_name = self._GetRowValue(query_hash, row, 'screenName')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'createdDate')
if timestamp:
# Convert the floating point value to an integer.
timestamp = int(timestamp)
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'updatedAt')
if timestamp:
# Convert the floating point value to an integer.
timestamp = int(timestamp)
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UPDATE)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a contact row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
"""
query_hash = hash(query)
event_data = TwitterIOSStatusEventData()
event_data.favorite_count = self._GetRowValue(
query_hash, row, 'favoriteCount')
event_data.favorited = self._GetRowValue(query_hash, row, 'favorited')
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.query = query
event_data.retweet_count = self._GetRowValue(
query_hash, row, 'retweetCount')
event_data.text = self._GetRowValue(query_hash, row, 'text')
event_data.user_id = self._GetRowValue(query_hash, row, 'user_id')
timestamp = self._GetRowValue(query_hash, row, 'date')
if timestamp:
# Convert the floating point value to an integer.
timestamp = int(timestamp)
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'updatedAt')
if timestamp:
# Convert the floating point value to an integer.
timestamp = int(timestamp)
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UPDATE)
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugin(TwitterIOSPlugin)
| rgayon/plaso | plaso/parsers/sqlite_plugins/twitter_ios.py | Python | apache-2.0 | 12,624 |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from tornado import netutil
from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
from tornado import gen
from tornado.http1connection import HTTP1Connection
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado.netutil import ssl_options_to_context
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
from contextlib import closing
import datetime
import gzip
import os
import shutil
import socket
import ssl
import sys
import tempfile
from io import BytesIO
def read_stream_body(stream, callback):
"""Reads an HTTP response from `stream` and runs callback with its
start_line, headers and body."""
chunks = []
class Delegate(HTTPMessageDelegate):
def headers_received(self, start_line, headers):
self.headers = headers
self.start_line = start_line
def data_received(self, chunk):
chunks.append(chunk)
def finish(self):
callback((self.start_line, self.headers, b''.join(chunks)))
conn = HTTP1Connection(stream, True)
conn.read_response(Delegate())
class HandlerBaseTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', self.__class__.Handler)])
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
class HelloWorldRequestHandler(RequestHandler):
def initialize(self, protocol="http"):
self.expected_protocol = protocol
def get(self):
if self.request.protocol != self.expected_protocol:
raise Exception("unexpected protocol")
self.finish("Hello world")
def post(self):
self.finish("Got %d bytes in POST" % len(self.request.body))
# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
# ClientHello messages, which are rejected by SSLv3 and TLSv1
# servers. Note that while the OPENSSL_VERSION_INFO was formally
# introduced in python3.2, it was present but undocumented in
# python 2.7
skipIfOldSSL = unittest.skipIf(
getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
"old version of ssl module and/or openssl")
class BaseSSLTest(AsyncHTTPSTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler,
dict(protocol="https"))])
class SSLTestMixin(object):
def get_ssl_options(self):
return dict(ssl_version=self.get_ssl_version(), # type: ignore
**AsyncHTTPSTestCase.get_ssl_options())
def get_ssl_version(self):
raise NotImplementedError()
def test_ssl(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
def test_large_post(self):
response = self.fetch('/',
method='POST',
body='A' * 5000)
self.assertEqual(response.body, b"Got 5000 bytes in POST")
def test_non_ssl_request(self):
# Make sure the server closes the connection when it gets a non-ssl
# connection, rather than waiting for a timeout or otherwise
# misbehaving.
with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
with ExpectLog(gen_log, 'Uncaught exception', required=False):
self.http_client.fetch(
self.get_url("/").replace('https:', 'http:'),
self.stop,
request_timeout=3600,
connect_timeout=3600)
response = self.wait()
self.assertEqual(response.code, 599)
def test_error_logging(self):
# No stack traces are logged for SSL errors.
with ExpectLog(gen_log, 'SSL Error') as expect_log:
self.http_client.fetch(
self.get_url("/").replace("https:", "http:"),
self.stop)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertFalse(expect_log.logged_stack)
# Python's SSL implementation differs significantly between versions.
# For example, SSLv3 and TLSv1 throw an exception if you try to read
# from the socket before the handshake is complete, but the default
# of SSLv23 allows it.
class SSLv23Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv23
@skipIfOldSSL
class SSLv3Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv3
@skipIfOldSSL
class TLSv1Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_TLSv1
class SSLContextTest(BaseSSLTest, SSLTestMixin):
def get_ssl_options(self):
context = ssl_options_to_context(
AsyncHTTPSTestCase.get_ssl_options(self))
assert isinstance(context, ssl.SSLContext)
return context
class BadSSLOptionsTest(unittest.TestCase):
def test_missing_arguments(self):
application = Application()
self.assertRaises(KeyError, HTTPServer, application, ssl_options={
"keyfile": "/__missing__.crt",
})
def test_missing_key(self):
"""A missing SSL key should cause an immediate exception."""
application = Application()
module_dir = os.path.dirname(__file__)
existing_certificate = os.path.join(module_dir, 'test.crt')
existing_key = os.path.join(module_dir, 'test.key')
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": "/__mising__.crt",
})
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": existing_certificate,
"keyfile": "/__missing__.key"
})
# This actually works because both files exist
HTTPServer(application, ssl_options={
"certfile": existing_certificate,
"keyfile": existing_key,
})
class MultipartTestHandler(RequestHandler):
def post(self):
self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
"argument": self.get_argument("argument"),
"filename": self.request.files["files"][0].filename,
"filebody": _unicode(self.request.files["files"][0]["body"]),
})
# This test is also called from wsgi_test
class HTTPConnectionTest(AsyncHTTPTestCase):
def get_handlers(self):
return [("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler)]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body, newline=b"\r\n"):
with closing(IOStream(socket.socket())) as stream:
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
stream.write(
newline.join(headers +
[utf8("Content-Length: %d" % len(body))]) +
newline + newline + body)
read_stream_body(stream, self.stop)
start_line, headers, body = self.wait()
return body
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch([
b"POST /multipart HTTP/1.0",
b"Content-Type: multipart/form-data; boundary=1234567890",
b"X-Header-encoding-test: \xe9",
],
b"\r\n".join([
b"Content-Disposition: form-data; name=argument",
b"",
u"\u00e1".encode("utf-8"),
b"--1234567890",
u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode("utf8"),
b"",
u"\u00fa".encode("utf-8"),
b"--1234567890--",
b"",
]))
data = json_decode(response)
self.assertEqual(u"\u00e9", data["header"])
self.assertEqual(u"\u00e1", data["argument"])
self.assertEqual(u"\u00f3", data["filename"])
self.assertEqual(u"\u00fa", data["filebody"])
def test_newlines(self):
# We support both CRLF and bare LF as line separators.
for newline in (b"\r\n", b"\n"):
response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"",
newline=newline)
self.assertEqual(response, b'Hello world')
def test_100_continue(self):
# Run through a 100-continue interaction by hand:
# When given Expect: 100-continue, we get a 100 response after the
# headers, and then the real response after the body.
stream = IOStream(socket.socket())
stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
b"Content-Length: 1024",
b"Expect: 100-continue",
b"Connection: close",
b"\r\n"]), callback=self.stop)
self.wait()
stream.read_until(b"\r\n\r\n", self.stop)
data = self.wait()
self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
stream.write(b"a" * 1024)
stream.read_until(b"\r\n", self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
stream.read_until(b"\r\n\r\n", self.stop)
header_data = self.wait()
headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Got 1024 bytes in POST")
stream.close()
class EchoHandler(RequestHandler):
def get(self):
self.write(recursive_unicode(self.request.arguments))
def post(self):
self.write(recursive_unicode(self.request.arguments))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
fields = [
('method', str),
('uri', str),
('version', str),
('remote_ip', str),
('protocol', str),
('host', str),
('path', str),
('query', str),
]
for field, expected_type in fields:
self.check_type(field, getattr(self.request, field), expected_type)
self.check_type('header_key', list(self.request.headers.keys())[0], str)
self.check_type('header_value', list(self.request.headers.values())[0], str)
self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
# secure cookies
self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes)
def post(self):
self.check_type('body', self.request.body, bytes)
self.write(self.errors)
def get(self):
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class HTTPServerTest(AsyncHTTPTestCase):
def get_app(self):
return Application([("/echo", EchoHandler),
("/typecheck", TypeCheckHandler),
("//doubleslash", EchoHandler),
])
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u"\u00e9"]})
def test_empty_query_string(self):
response = self.fetch("/echo?foo=&foo=")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u"", u""]})
def test_empty_post_parameters(self):
response = self.fetch("/echo", method="POST", body="foo=&bar=")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u""], u"bar": [u""]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
response = self.fetch("/typecheck?foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
def test_double_slash(self):
# urlparse.urlsplit (which tornado.httpserver used to use
# incorrectly) would parse paths beginning with "//" as
# protocol-relative urls.
response = self.fetch("//doubleslash")
self.assertEqual(200, response.code)
self.assertEqual(json_decode(response.body), {})
def test_malformed_body(self):
# parse_qs is pretty forgiving, but it will fail on python 3
# if the data is not utf8. On python 2 parse_qs will work,
# but then the recursive_unicode call in EchoHandler will
# fail.
if str is bytes:
return
with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
response = self.fetch(
'/echo', method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body=b'\xe9')
self.assertEqual(200, response.code)
self.assertEqual(b'{}', response.body)
class HTTPServerRawTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/echo', EchoHandler),
])
def setUp(self):
super(HTTPServerRawTest, self).setUp()
self.stream = IOStream(socket.socket())
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
def tearDown(self):
self.stream.close()
super(HTTPServerRawTest, self).tearDown()
def test_empty_request(self):
self.stream.close()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
def test_malformed_first_line_response(self):
with ExpectLog(gen_log, '.*Malformed HTTP request line'):
self.stream.write(b'asdf\r\n\r\n')
read_stream_body(self.stream, self.stop)
start_line, headers, response = self.wait()
self.assertEqual('HTTP/1.1', start_line.version)
self.assertEqual(400, start_line.code)
self.assertEqual('Bad Request', start_line.reason)
def test_malformed_first_line_log(self):
with ExpectLog(gen_log, '.*Malformed HTTP request line'):
self.stream.write(b'asdf\r\n\r\n')
# TODO: need an async version of ExpectLog so we don't need
# hard-coded timeouts here.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
self.stop)
self.wait()
def test_malformed_headers(self):
with ExpectLog(gen_log, '.*Malformed HTTP headers'):
self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
self.io_loop.add_timeout(datetime.timedelta(seconds=0.05),
self.stop)
self.wait()
def test_chunked_request_body(self):
# Chunked requests are not widely supported and we don't have a way
# to generate them in AsyncHTTPClient, but HTTPServer will read them.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
start_line, headers, response = self.wait()
self.assertEqual(json_decode(response), {u'foo': [u'bar']})
def test_chunked_request_uppercase(self):
# As per RFC 2616 section 3.6, "Transfer-Encoding" header's value is
# case-insensitive.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: Chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
start_line, headers, response = self.wait()
self.assertEqual(json_decode(response), {u'foo': [u'bar']})
def test_invalid_content_length(self):
with ExpectLog(gen_log, '.*Only integer Content-Length is allowed'):
self.stream.write(b"""\
POST /echo HTTP/1.1
Content-Length: foo
bar
""".replace(b"\n", b"\r\n"))
self.stream.read_until_close(self.stop)
self.wait()
class XHeaderTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(remote_ip=self.request.remote_ip,
remote_protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(xheaders=True, trusted_downstream=['5.5.5.5'])
def test_ip_headers(self):
self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
"4.4.4.4")
valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
"4.4.4.4")
valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
invalid_chars = {"X-Real-IP": "4.4.4.4<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars)["remote_ip"],
"127.0.0.1")
invalid_chars_list = {"X-Forwarded-For": "4.4.4.4, 5.5.5.5<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars_list)["remote_ip"],
"127.0.0.1")
invalid_host = {"X-Real-IP": "www.google.com"}
self.assertEqual(
self.fetch_json("/", headers=invalid_host)["remote_ip"],
"127.0.0.1")
def test_trusted_downstream(self):
valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4, 5.5.5.5"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
"4.4.4.4")
def test_scheme_headers(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "http")
https_scheme = {"X-Scheme": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_scheme)["remote_protocol"],
"https")
https_forwarded = {"X-Forwarded-Proto": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_forwarded)["remote_protocol"],
"https")
bad_forwarded = {"X-Forwarded-Proto": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_forwarded)["remote_protocol"],
"http")
class SSLXHeaderTest(AsyncHTTPSTestCase, HandlerBaseTestCase):
def get_app(self):
return Application([('/', XHeaderTest.Handler)])
def get_httpserver_options(self):
output = super(SSLXHeaderTest, self).get_httpserver_options()
output['xheaders'] = True
return output
def test_request_without_xprotocol(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "https")
http_scheme = {"X-Scheme": "http"}
self.assertEqual(
self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http")
bad_scheme = {"X-Scheme": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https")
class ManualProtocolTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(protocol='https')
def test_manual_protocol(self):
self.assertEqual(self.fetch_json('/')['protocol'], 'https')
@unittest.skipIf(not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin',
"unix sockets not supported on this platform")
class UnixSocketTest(AsyncTestCase):
"""HTTPServers can listen on Unix sockets too.
Why would you want to do this? Nginx can proxy to backends listening
on unix sockets, for one thing (and managing a namespace for unix
sockets can be easier than managing a bunch of TCP port numbers).
Unfortunately, there's no way to specify a unix socket in a url for
an HTTP client, so we have to test this by hand.
"""
def setUp(self):
super(UnixSocketTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
self.sockfile = os.path.join(self.tmpdir, "test.sock")
sock = netutil.bind_unix_socket(self.sockfile)
app = Application([("/hello", HelloWorldRequestHandler)])
self.server = HTTPServer(app)
self.server.add_socket(sock)
self.stream = IOStream(socket.socket(socket.AF_UNIX))
self.stream.connect(self.sockfile, self.stop)
self.wait()
def tearDown(self):
self.stream.close()
self.server.stop()
shutil.rmtree(self.tmpdir)
super(UnixSocketTest, self).tearDown()
def test_unix_socket(self):
self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n")
self.stream.read_until(b"\r\n", self.stop)
response = self.wait()
self.assertEqual(response, b"HTTP/1.1 200 OK\r\n")
self.stream.read_until(b"\r\n\r\n", self.stop)
headers = HTTPHeaders.parse(self.wait().decode('latin1'))
self.stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Hello world")
def test_unix_socket_bad_request(self):
# Unix sockets don't have remote addresses so they just return an
# empty string.
with ExpectLog(gen_log, "Malformed HTTP message from"):
self.stream.write(b"garbage\r\n\r\n")
self.stream.read_until_close(self.stop)
response = self.wait()
self.assertEqual(response, b"HTTP/1.1 400 Bad Request\r\n\r\n")
class KeepAliveTest(AsyncHTTPTestCase):
"""Tests various scenarios for HTTP 1.1 keep-alive support.
These tests don't use AsyncHTTPClient because we want to control
connection reuse and closing.
"""
def get_app(self):
class HelloHandler(RequestHandler):
def get(self):
self.finish('Hello world')
def post(self):
self.finish('Hello world')
class LargeHandler(RequestHandler):
def get(self):
# 512KB should be bigger than the socket buffers so it will
# be written out in chunks.
self.write(''.join(chr(i % 256) * 1024 for i in range(512)))
class FinishOnCloseHandler(RequestHandler):
@asynchronous
def get(self):
self.flush()
def on_connection_close(self):
# This is not very realistic, but finishing the request
# from the close callback has the right timing to mimic
# some errors seen in the wild.
self.finish('closed')
return Application([('/', HelloHandler),
('/large', LargeHandler),
('/finish_on_close', FinishOnCloseHandler)])
def setUp(self):
super(KeepAliveTest, self).setUp()
self.http_version = b'HTTP/1.1'
def tearDown(self):
# We just closed the client side of the socket; let the IOLoop run
# once to make sure the server side got the message.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
if hasattr(self, 'stream'):
self.stream.close()
super(KeepAliveTest, self).tearDown()
# The next few methods are a crude manual http client
def connect(self):
self.stream = IOStream(socket.socket())
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
def read_headers(self):
self.stream.read_until(b'\r\n', self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b'HTTP/1.1 200'), first_line)
self.stream.read_until(b'\r\n\r\n', self.stop)
header_bytes = self.wait()
headers = HTTPHeaders.parse(header_bytes.decode('latin1'))
return headers
def read_response(self):
self.headers = self.read_headers()
self.stream.read_bytes(int(self.headers['Content-Length']), self.stop)
body = self.wait()
self.assertEqual(b'Hello world', body)
def close(self):
self.stream.close()
del self.stream
def test_two_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.close()
def test_request_close(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\nConnection: close\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.assertEqual(self.headers['Connection'], 'close')
self.close()
# keepalive is supported for http 1.0 too, but it's opt-in
def test_http10(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.assertTrue('Connection' not in self.headers)
self.close()
def test_http10_keepalive(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_http10_keepalive_extra_crlf(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_pipelined_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
self.read_response()
self.read_response()
self.close()
def test_pipelined_cancel(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
# only read once
self.read_response()
self.close()
def test_cancel_during_download(self):
self.connect()
self.stream.write(b'GET /large HTTP/1.1\r\n\r\n')
self.read_headers()
self.stream.read_bytes(1024, self.stop)
self.wait()
self.close()
def test_finish_while_closed(self):
self.connect()
self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n')
self.read_headers()
self.close()
def test_keepalive_chunked(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n0\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
class GzipBaseTest(object):
def get_app(self):
return Application([('/', EchoHandler)])
def post_gzip(self, body):
bytesio = BytesIO()
gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio)
gzip_file.write(utf8(body))
gzip_file.close()
compressed_body = bytesio.getvalue()
return self.fetch('/', method='POST', body=compressed_body,
headers={'Content-Encoding': 'gzip'})
def test_uncompressed(self):
response = self.fetch('/', method='POST', body='foo=bar')
self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
def get_httpserver_options(self):
return dict(decompress_request=True)
def test_gzip(self):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {u'foo': [u'bar']})
class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
def test_gzip_unsupported(self):
# Gzip support is opt-in; without it the server fails to parse
# the body (but parsing form bodies is currently just a log message,
# not a fatal error).
with ExpectLog(gen_log, "Unsupported Content-Encoding"):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {})
class StreamingChunkSizeTest(AsyncHTTPTestCase):
# 50 characters long, and repetitive so it can be compressed.
BODY = b'01234567890123456789012345678901234567890123456789'
CHUNK_SIZE = 16
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient()
def get_httpserver_options(self):
return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def headers_received(self, start_line, headers):
self.chunk_lengths = []
def data_received(self, chunk):
self.chunk_lengths.append(len(chunk))
def finish(self):
response_body = utf8(json_encode(self.chunk_lengths))
self.connection.write_headers(
ResponseStartLine('HTTP/1.1', 200, 'OK'),
HTTPHeaders({'Content-Length': str(len(response_body))}))
self.connection.write(response_body)
self.connection.finish()
def get_app(self):
class App(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
return StreamingChunkSizeTest.MessageDelegate(request_conn)
return App()
def fetch_chunk_sizes(self, **kwargs):
response = self.fetch('/', method='POST', **kwargs)
response.rethrow()
chunks = json_decode(response.body)
self.assertEqual(len(self.BODY), sum(chunks))
for chunk_size in chunks:
self.assertLessEqual(chunk_size, self.CHUNK_SIZE,
'oversized chunk: ' + str(chunks))
self.assertGreater(chunk_size, 0,
'empty chunk: ' + str(chunks))
return chunks
def compress(self, body):
bytesio = BytesIO()
gzfile = gzip.GzipFile(mode='w', fileobj=bytesio)
gzfile.write(body)
gzfile.close()
compressed = bytesio.getvalue()
if len(compressed) >= len(body):
raise Exception("body did not shrink when compressed")
return compressed
def test_regular_body(self):
chunks = self.fetch_chunk_sizes(body=self.BODY)
# Without compression we know exactly what to expect.
self.assertEqual([16, 16, 16, 2], chunks)
def test_compressed_body(self):
self.fetch_chunk_sizes(body=self.compress(self.BODY),
headers={'Content-Encoding': 'gzip'})
# Compression creates irregular boundaries so the assertions
# in fetch_chunk_sizes are as specific as we can get.
def test_chunked_body(self):
def body_producer(write):
write(self.BODY[:20])
write(self.BODY[20:])
chunks = self.fetch_chunk_sizes(body_producer=body_producer)
# HTTP chunk boundaries translate to application-visible breaks
self.assertEqual([16, 4, 16, 14], chunks)
def test_chunked_compressed(self):
compressed = self.compress(self.BODY)
self.assertGreater(len(compressed), 20)
def body_producer(write):
write(compressed[:20])
write(compressed[20:])
self.fetch_chunk_sizes(body_producer=body_producer,
headers={'Content-Encoding': 'gzip'})
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(max_header_size=1024)
def test_small_headers(self):
response = self.fetch("/", headers={'X-Filler': 'a' * 100})
response.rethrow()
self.assertEqual(response.body, b"Hello world")
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read", required=False):
response = self.fetch("/", headers={'X-Filler': 'a' * 1000})
# 431 is "Request Header Fields Too Large", defined in RFC
# 6585. However, many implementations just close the
# connection in this case, resulting in a 599.
self.assertIn(response.code, (431, 599))
@skipOnTravis
class IdleTimeoutTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(idle_connection_timeout=0.1)
def setUp(self):
super(IdleTimeoutTest, self).setUp()
self.streams = []
def tearDown(self):
super(IdleTimeoutTest, self).tearDown()
for stream in self.streams:
stream.close()
def connect(self):
stream = IOStream(socket.socket())
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
self.streams.append(stream)
return stream
def test_unused_connection(self):
stream = self.connect()
stream.set_close_callback(self.stop)
self.wait()
def test_idle_after_use(self):
stream = self.connect()
stream.set_close_callback(lambda: self.stop("closed"))
# Use the connection twice to make sure keep-alives are working
for i in range(2):
stream.write(b"GET / HTTP/1.1\r\n\r\n")
stream.read_until(b"\r\n\r\n", self.stop)
self.wait()
stream.read_bytes(11, self.stop)
data = self.wait()
self.assertEqual(data, b"Hello world")
# Now let the timeout trigger and close the connection.
data = self.wait()
self.assertEqual(data, "closed")
class BodyLimitsTest(AsyncHTTPTestCase):
def get_app(self):
class BufferedHandler(RequestHandler):
def put(self):
self.write(str(len(self.request.body)))
@stream_request_body
class StreamingHandler(RequestHandler):
def initialize(self):
self.bytes_read = 0
def prepare(self):
if 'expected_size' in self.request.arguments:
self.request.connection.set_max_body_size(
int(self.get_argument('expected_size')))
if 'body_timeout' in self.request.arguments:
self.request.connection.set_body_timeout(
float(self.get_argument('body_timeout')))
def data_received(self, data):
self.bytes_read += len(data)
def put(self):
self.write(str(self.bytes_read))
return Application([('/buffered', BufferedHandler),
('/streaming', StreamingHandler)])
def get_httpserver_options(self):
return dict(body_timeout=3600, max_body_size=4096)
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient()
def test_small_body(self):
response = self.fetch('/buffered', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
response = self.fetch('/streaming', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
def test_large_body_buffered(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/buffered', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 400)
def test_large_body_buffered_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/buffered', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
# this test is flaky on windows; accept 400 (expected) or 599
self.assertIn(response.code, [400, 599])
def test_large_body_streaming(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/streaming', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 400)
def test_large_body_streaming_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/streaming', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
# this test is flaky on windows; accept 400 (expected) or 599
self.assertIn(response.code, [400, 599])
def test_large_body_streaming_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body=b'a' * 10240)
self.assertEqual(response.body, b'10240')
def test_large_body_streaming_chunked_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.body, b'10240')
@gen_test
def test_timeout(self):
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream because AsyncHTTPClient won't let us read a
# response without finishing a body.
stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n'
b'Content-Length: 42\r\n\r\n')
with ExpectLog(gen_log, 'Timeout reading body'):
response = yield stream.read_until_close()
self.assertEqual(response, b'')
finally:
stream.close()
@gen_test
def test_body_size_override_reset(self):
# The max_body_size override is reset between requests.
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream so we can make sure it's all on one connection.
stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
stream.write(b'a' * 10240)
start_line, headers, response = yield gen.Task(read_stream_body, stream)
self.assertEqual(response, b'10240')
# Without the ?expected_size parameter, we get the old default value
stream.write(b'PUT /streaming HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
with ExpectLog(gen_log, '.*Content-Length too long'):
data = yield stream.read_until_close()
self.assertEqual(data, b'HTTP/1.1 400 Bad Request\r\n\r\n')
finally:
stream.close()
class LegacyInterfaceTest(AsyncHTTPTestCase):
def get_app(self):
# The old request_callback interface does not implement the
# delegate interface, and writes its response via request.write
# instead of request.connection.write_headers.
def handle_request(request):
self.http1 = request.version.startswith("HTTP/1.")
if not self.http1:
# This test will be skipped if we're using HTTP/2,
# so just close it out cleanly using the modern interface.
request.connection.write_headers(
ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
message = b"Hello world"
request.write(utf8("HTTP/1.1 200 OK\r\n"
"Content-Length: %d\r\n\r\n" % len(message)))
request.write(message)
request.finish()
return handle_request
def test_legacy_interface(self):
response = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(response.body, b"Hello world")
| SuminAndrew/tornado | tornado/test/httpserver_test.py | Python | apache-2.0 | 42,826 |
'''
A few ways of generating random strings
'''
import hashlib
import random
import string
import uuid
import os
def generate_unique_id():
# bf47b209-b4b2-4edc-a0e8-75b9eb48bc09
return str(uuid.uuid4())
def generate_secret(length=32):
# This could be used, for example, for passwords, or for app secrets
# KlW5ISSRgJZNHFT2KcFNb2OVEdPGDMBE
return ''.join(random.choice(
string.ascii_letters + string.digits) for x in xrange(length))
def generate_secret_crypto(length=32):
# This could be used as an alternative to generate_secret(). The result
# should be good enough that's suitable for cryptographic use.
# 7cbe8bd81672699cca5796f523cf6ed4946586bb28305d2f75842ce521815e73
return os.urandom(length).encode('hex')
def generate_key():
# Returns a python long int with 256 random bits. For example:
# 110541642560575068150163131946313287475321532576990646525066748601962842052091
random_bits = str(random.getrandbits(256))
# Then hash it. For example:
# b6dc7b0d72cc12d253efe5c6eee0c88a6f8c2f3607f127d117295106de176f29
return hashlib.sha256(random_bits).hexdigest()
| zugaldia/appython | appython/utils/generators.py | Python | apache-2.0 | 1,141 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0019_projectpage_piwik_id'),
]
operations = [
migrations.AddField(
model_name='projectpage',
name='uptimerobot_name',
field=models.CharField(null=True, max_length=100, blank=True),
),
]
| City-of-Helsinki/devheldev | projects/migrations/0020_projectpage_uptimerobot_name.py | Python | agpl-3.0 | 442 |
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.expr.types import ArrayExpr, TableExpr, RelationError
from ibis.common import ExpressionError
from ibis.expr.datatypes import array_type
import ibis.expr.analysis as L
import ibis.expr.api as api
import ibis.expr.types as ir
import ibis.expr.operations as ops
import ibis
from ibis.compat import unittest
from ibis.expr.tests.mocks import MockConnection, BasicTestCase
import ibis.common as com
import ibis.config as config
from ibis.tests.util import assert_equal
class TestTableExprBasics(BasicTestCase, unittest.TestCase):
def test_empty_schema(self):
table = api.table([], 'foo')
assert len(table.schema()) == 0
def test_columns(self):
t = self.con.table('alltypes')
result = t.columns
expected = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
assert result == expected
def test_view_new_relation(self):
# For assisting with self-joins and other self-referential operations
# where we need to be able to treat instances of the same TableExpr as
# semantically distinct
#
# This thing is not exactly a projection, since it has no semantic
# meaning when it comes to execution
tview = self.table.view()
roots = tview._root_tables()
assert len(roots) == 1
assert roots[0] is tview.op()
def test_get_type(self):
for k, v in self.schema_dict.items():
assert self.table._get_type(k) == v
def test_getitem_column_select(self):
for k, v in self.schema_dict.items():
col = self.table[k]
# Make sure it's the right type
assert isinstance(col, ArrayExpr)
assert isinstance(col, array_type(v))
# Ensure we have a field selection with back-reference to the table
parent = col.parent()
assert isinstance(parent, ops.TableColumn)
assert parent.parent() is self.table
def test_getitem_attribute(self):
result = self.table.a
assert_equal(result, self.table['a'])
assert 'a' in dir(self.table)
# Project and add a name that conflicts with a TableExpr built-in
# attribute
view = self.table[[self.table, self.table['a'].name('schema')]]
assert not isinstance(view.schema, ArrayExpr)
def test_projection(self):
cols = ['f', 'a', 'h']
proj = self.table[cols]
assert isinstance(proj, TableExpr)
assert isinstance(proj.op(), ops.Projection)
assert proj.schema().names == cols
for c in cols:
expr = proj[c]
assert isinstance(expr, type(self.table[c]))
def test_projection_no_list(self):
expr = (self.table.f * 2).name('bar')
result = self.table.select(expr)
expected = self.table.projection([expr])
assert_equal(result, expected)
def test_projection_with_exprs(self):
# unnamed expr to test
mean_diff = (self.table['a'] - self.table['c']).mean()
col_exprs = [self.table['b'].log().name('log_b'),
mean_diff.name('mean_diff')]
proj = self.table[col_exprs + ['g']]
schema = proj.schema()
assert schema.names == ['log_b', 'mean_diff', 'g']
assert schema.types == ['double', 'double', 'string']
# Test with unnamed expr
self.assertRaises(ExpressionError, self.table.projection,
['g', self.table['a'] - self.table['c']])
def test_projection_duplicate_names(self):
self.assertRaises(com.IntegrityError, self.table.projection,
[self.table.c, self.table.c])
def test_projection_invalid_root(self):
schema1 = {
'foo': 'double',
'bar': 'int32'
}
left = api.table(schema1, name='foo')
right = api.table(schema1, name='bar')
exprs = [right['foo'], right['bar']]
self.assertRaises(RelationError, left.projection, exprs)
def test_projection_unnamed_literal_interactive_blowup(self):
# #147 and #153 alike
table = self.con.table('functional_alltypes')
with config.option_context('interactive', True):
try:
table.select([table.bigint_col, ibis.literal(5)])
except Exception as e:
assert 'named' in e.args[0]
def test_projection_of_aggregated(self):
# Fully-formed aggregations "block"; in a projection, column
# expressions referencing table expressions below the aggregation are
# invalid.
pass
def test_projection_with_star_expr(self):
new_expr = (self.table['a'] * 5).name('bigger_a')
t = self.table
# it lives!
proj = t[t, new_expr]
repr(proj)
ex_names = self.table.schema().names + ['bigger_a']
assert proj.schema().names == ex_names
# cannot pass an invalid table expression
t2 = t.aggregate([t['a'].sum().name('sum(a)')], by=['g'])
self.assertRaises(RelationError, t.__getitem__, [t2])
# TODO: there may be some ways this can be invalid
def test_projection_convenient_syntax(self):
proj = self.table[self.table, self.table['a'].name('foo')]
proj2 = self.table[[self.table, self.table['a'].name('foo')]]
assert_equal(proj, proj2)
def test_projection_mutate_analysis_bug(self):
# GH #549
t = self.con.table('airlines')
# it works!
(t[t.depdelay.notnull()]
.mutate(leg=ibis.literal('-').join([t.origin, t.dest]))
['year', 'month', 'day', 'depdelay', 'leg'])
def test_projection_self(self):
result = self.table[self.table]
expected = self.table.projection(self.table)
assert_equal(result, expected)
def test_add_column(self):
# Creates a projection with a select-all on top of a non-projection
# TableExpr
new_expr = (self.table['a'] * 5).name('bigger_a')
t = self.table
result = t.add_column(new_expr)
expected = t[[t, new_expr]]
assert_equal(result, expected)
result = t.add_column(new_expr, 'wat')
expected = t[[t, new_expr.name('wat')]]
assert_equal(result, expected)
def test_add_column_scalar_expr(self):
# Check literals, at least
pass
def test_add_column_aggregate_crossjoin(self):
# A new column that depends on a scalar value produced by this or some
# other table.
#
# For example:
# SELECT *, b - VAL
# FROM table1
#
# Here, VAL could be something produced by aggregating table1 or any
# other table for that matter.
pass
def test_add_column_existing_projection(self):
# The "blocking" predecessor table is a projection; we can simply add
# the column to the existing projection
foo = (self.table.f * 2).name('foo')
bar = (self.table.f * 4).name('bar')
t2 = self.table.add_column(foo)
t3 = t2.add_column(bar)
expected = self.table[self.table, foo, bar]
assert_equal(t3, expected)
def test_mutate(self):
one = self.table.f * 2
foo = (self.table.a + self.table.b).name('foo')
expr = self.table.mutate(foo, one=one, two=2)
expected = self.table[self.table, foo, one.name('one'),
ibis.literal(2).name('two')]
assert_equal(expr, expected)
def test_mutate_alter_existing_columns(self):
new_f = self.table.f * 2
foo = self.table.d * 2
expr = self.table.mutate(f=new_f, foo=foo)
expected = self.table['a', 'b', 'c', 'd', 'e',
new_f.name('f'), 'g', 'h',
foo.name('foo')]
assert_equal(expr, expected)
def test_replace_column(self):
tb = api.table([
('a', 'int32'),
('b', 'double'),
('c', 'string')
])
expr = tb.b.cast('int32')
tb2 = tb.set_column('b', expr)
expected = tb[tb.a, expr.name('b'), tb.c]
assert_equal(tb2, expected)
def test_filter_no_list(self):
pred = self.table.a > 5
result = self.table.filter(pred)
expected = self.table[pred]
assert_equal(result, expected)
def test_add_predicate(self):
pred = self.table['a'] > 5
result = self.table[pred]
assert isinstance(result.op(), ops.Filter)
def test_filter_root_table_preserved(self):
result = self.table[self.table['a'] > 5]
roots = result.op().root_tables()
assert roots[0] is self.table.op()
def test_invalid_predicate(self):
# a lookalike
table2 = api.table(self.schema, name='bar')
self.assertRaises(RelationError, self.table.__getitem__,
table2['a'] > 5)
def test_add_predicate_coalesce(self):
# Successive predicates get combined into one rather than nesting. This
# is mainly to enhance readability since we could handle this during
# expression evaluation anyway.
pred1 = self.table['a'] > 5
pred2 = self.table['b'] > 0
result = self.table[pred1][pred2]
expected = self.table.filter([pred1, pred2])
assert_equal(result, expected)
# 59, if we are not careful, we can obtain broken refs
interm = self.table[pred1]
result = interm.filter([interm['b'] > 0])
assert_equal(result, expected)
def test_rewrite_expr_with_parent(self):
table = self.con.table('test1')
table2 = table[table['f'] > 0]
expr = table2['c'] == 2
result = L.substitute_parents(expr)
expected = table['c'] == 2
assert_equal(result, expected)
# Substitution not fully possible if we depend on a new expr in a
# projection
table4 = table[['c', (table['c'] * 2).name('foo')]]
expr = table4['c'] == table4['foo']
result = L.substitute_parents(expr)
expected = table['c'] == table4['foo']
assert_equal(result, expected)
def test_rewrite_distinct_but_equal_objects(self):
t = self.con.table('test1')
t_copy = self.con.table('test1')
table2 = t[t_copy['f'] > 0]
expr = table2['c'] == 2
result = L.substitute_parents(expr)
expected = t['c'] == 2
assert_equal(result, expected)
def test_repr_same_but_distinct_objects(self):
t = self.con.table('test1')
t_copy = self.con.table('test1')
table2 = t[t_copy['f'] > 0]
result = repr(table2)
assert result.count('DatabaseTable') == 1
def test_filter_fusion_distinct_table_objects(self):
t = self.con.table('test1')
tt = self.con.table('test1')
expr = t[t.f > 0][t.c > 0]
expr2 = t[t.f > 0][tt.c > 0]
expr3 = t[tt.f > 0][tt.c > 0]
expr4 = t[tt.f > 0][t.c > 0]
assert_equal(expr, expr2)
assert repr(expr) == repr(expr2)
assert_equal(expr, expr3)
assert_equal(expr, expr4)
def test_rewrite_substitute_distinct_tables(self):
t = self.con.table('test1')
tt = self.con.table('test1')
expr = t[t.c > 0]
expr2 = tt[tt.c > 0]
metric = t.f.sum().name('metric')
expr3 = expr.aggregate(metric)
result = L.sub_for(expr3, [(expr2, t)])
expected = t.aggregate(metric)
assert_equal(result, expected)
def test_rewrite_join_projection_without_other_ops(self):
# Drop out filters and other commutative table operations. Join
# predicates are "lifted" to reference the base, unmodified join roots
# Star schema with fact table
table = self.con.table('star1')
table2 = self.con.table('star2')
table3 = self.con.table('star3')
filtered = table[table['f'] > 0]
pred1 = table['foo_id'] == table2['foo_id']
pred2 = filtered['bar_id'] == table3['bar_id']
j1 = filtered.left_join(table2, [pred1])
j2 = j1.inner_join(table3, [pred2])
# Project out the desired fields
view = j2[[filtered, table2['value1'], table3['value2']]]
# Construct the thing we expect to obtain
ex_pred2 = table['bar_id'] == table3['bar_id']
ex_expr = (table.left_join(table2, [pred1])
.inner_join(table3, [ex_pred2]))
rewritten_proj = L.substitute_parents(view)
op = rewritten_proj.op()
assert_equal(op.table, ex_expr)
# Ensure that filtered table has been substituted with the base table
assert op.selections[0] is table
def test_rewrite_past_projection(self):
table = self.con.table('test1')
# Rewrite past a projection
table3 = table[['c', 'f']]
expr = table3['c'] == 2
result = L.substitute_parents(expr)
expected = table['c'] == 2
assert_equal(result, expected)
# Unsafe to rewrite past projection
table5 = table[(table.f * 2).name('c'), table.f]
expr = table5['c'] == 2
result = L.substitute_parents(expr)
assert result is expr
def test_projection_predicate_pushdown(self):
# Probably test this during the evaluation phase. In SQL, "fusable"
# table operations will be combined together into a single select
# statement
#
# see ibis #71 for more on this
t = self.table
proj = t['a', 'b', 'c']
# Rewrite a little more aggressively here
result = proj[t.a > 0]
# at one point these yielded different results
filtered = t[t.a > 0]
expected = filtered[t.a, t.b, t.c]
expected2 = filtered.projection(['a', 'b', 'c'])
assert_equal(result, expected)
assert_equal(result, expected2)
def test_projection_with_join_pushdown_rewrite_refs(self):
# Observed this expression IR issue in a TopK-rewrite context
table1 = api.table([
('a_key1', 'string'),
('a_key2', 'string'),
('a_value', 'double')
], 'foo')
table2 = api.table([
('b_key1', 'string'),
('b_name', 'string'),
('b_value', 'double')
], 'bar')
table3 = api.table([
('c_key2', 'string'),
('c_name', 'string')
], 'baz')
proj = (table1.inner_join(table2, [('a_key1', 'b_key1')])
.inner_join(table3, [(table1.a_key2, table3.c_key2)])
[table1, table2.b_name.name('b'), table3.c_name.name('c'),
table2.b_value])
cases = [
(proj.a_value > 0, table1.a_value > 0),
(proj.b_value > 0, table2.b_value > 0)
]
for higher_pred, lower_pred in cases:
result = proj.filter([higher_pred])
op = result.op()
assert isinstance(op, ops.Projection)
filter_op = op.table.op()
assert isinstance(filter_op, ops.Filter)
new_pred = filter_op.predicates[0]
assert_equal(new_pred, lower_pred)
def test_column_relabel(self):
# GH #551. Keeping the test case very high level to not presume that
# the relabel is necessarily implemented using a projection
types = ['int32', 'string', 'double']
table = api.table(zip(['foo', 'bar', 'baz'], types))
result = table.relabel({'foo': 'one', 'baz': 'three'})
schema = result.schema()
ex_schema = api.schema(zip(['one', 'bar', 'three'], types))
assert_equal(schema, ex_schema)
def test_limit(self):
limited = self.table.limit(10, offset=5)
assert limited.op().n == 10
assert limited.op().offset == 5
def test_sort_by(self):
# Commit to some API for ascending and descending
#
# table.sort_by(['g', expr1, desc(expr2), desc(expr3)])
#
# Default is ascending for anything coercable to an expression,
# and we'll have ascending/descending wrappers to help.
result = self.table.sort_by(['f'])
sort_key = result.op().keys[0].op()
assert_equal(sort_key.expr, self.table.f)
assert sort_key.ascending
# non-list input. per #150
result2 = self.table.sort_by('f')
assert_equal(result, result2)
result2 = self.table.sort_by([('f', False)])
result3 = self.table.sort_by([('f', 'descending')])
result4 = self.table.sort_by([('f', 0)])
key2 = result2.op().keys[0].op()
key3 = result3.op().keys[0].op()
key4 = result4.op().keys[0].op()
assert not key2.ascending
assert not key3.ascending
assert not key4.ascending
assert_equal(result2, result3)
def test_sort_by_desc_deferred_sort_key(self):
result = (self.table.group_by('g')
.size()
.sort_by(ibis.desc('count')))
tmp = self.table.group_by('g').size()
expected = tmp.sort_by((tmp['count'], False))
expected2 = tmp.sort_by(ibis.desc(tmp['count']))
assert_equal(result, expected)
assert_equal(result, expected2)
def test_slice_convenience(self):
expr = self.table[:5]
expr2 = self.table[:5:1]
assert_equal(expr, self.table.limit(5))
assert_equal(expr, expr2)
expr = self.table[2:7]
expr2 = self.table[2:7:1]
assert_equal(expr, self.table.limit(5, offset=2))
assert_equal(expr, expr2)
self.assertRaises(ValueError, self.table.__getitem__, slice(2, 15, 2))
self.assertRaises(ValueError, self.table.__getitem__, slice(5, None))
self.assertRaises(ValueError, self.table.__getitem__, slice(None, -5))
self.assertRaises(ValueError, self.table.__getitem__, slice(-10, -5))
class TestAggregation(BasicTestCase, unittest.TestCase):
def test_count(self):
result = self.table['a'].count()
assert isinstance(result, api.Int64Scalar)
assert isinstance(result.op(), ops.Count)
def test_table_count(self):
result = self.table.count()
assert isinstance(result, api.Int64Scalar)
assert isinstance(result.op(), ops.Count)
assert result.get_name() == 'count'
def test_sum_expr_basics(self):
# Impala gives bigint for all integer types
ex_class = api.Int64Scalar
for c in self.int_cols + self.bool_cols:
result = self.table[c].sum()
assert isinstance(result, ex_class)
assert isinstance(result.op(), ops.Sum)
# Impala gives double for all floating point types
ex_class = api.DoubleScalar
for c in self.float_cols:
result = self.table[c].sum()
assert isinstance(result, ex_class)
assert isinstance(result.op(), ops.Sum)
def test_mean_expr_basics(self):
cols = self.int_cols + self.float_cols + self.bool_cols
for c in cols:
result = self.table[c].mean()
assert isinstance(result, api.DoubleScalar)
assert isinstance(result.op(), ops.Mean)
def test_aggregate_no_keys(self):
agg_exprs = [self.table['a'].sum().name('sum(a)'),
self.table['c'].mean().name('mean(c)')]
# A TableExpr, which in SQL at least will yield a table with a single
# row
result = self.table.aggregate(agg_exprs)
assert isinstance(result, TableExpr)
def test_aggregate_keys_basic(self):
agg_exprs = [self.table['a'].sum().name('sum(a)'),
self.table['c'].mean().name('mean(c)')]
# A TableExpr, which in SQL at least will yield a table with a single
# row
result = self.table.aggregate(agg_exprs, by=['g'])
assert isinstance(result, TableExpr)
# it works!
repr(result)
def test_aggregate_non_list_inputs(self):
# per #150
metric = self.table.f.sum().name('total')
by = 'g'
having = self.table.c.sum() > 10
result = self.table.aggregate(metric, by=by, having=having)
expected = self.table.aggregate([metric], by=[by], having=[having])
assert_equal(result, expected)
def test_aggregate_keywords(self):
t = self.table
expr = t.aggregate(foo=t.f.sum(), bar=lambda x: x.f.mean(),
by='g')
expr2 = t.group_by('g').aggregate(foo=t.f.sum(),
bar=lambda x: x.f.mean())
expected = t.aggregate([t.f.mean().name('bar'),
t.f.sum().name('foo')], by='g')
assert_equal(expr, expected)
assert_equal(expr2, expected)
def test_summary_expand_list(self):
summ = self.table.f.summary()
metric = self.table.g.group_concat().name('bar')
result = self.table.aggregate([metric, summ])
expected = self.table.aggregate([metric] + summ.exprs())
assert_equal(result, expected)
def test_aggregate_invalid(self):
# Pass a non-aggregation or non-scalar expr
pass
def test_filter_aggregate_pushdown_predicate(self):
# In the case where we want to add a predicate to an aggregate
# expression after the fact, rather than having to backpedal and add it
# before calling aggregate.
#
# TODO (design decision): This could happen automatically when adding a
# predicate originating from the same root table; if an expression is
# created from field references from the aggregated table then it
# becomes a filter predicate applied on top of a view
pred = self.table.f > 0
metrics = [self.table.a.sum().name('total')]
agged = self.table.aggregate(metrics, by=['g'])
filtered = agged.filter([pred])
expected = self.table[pred].aggregate(metrics, by=['g'])
assert_equal(filtered, expected)
def test_filter_aggregate_partial_pushdown(self):
pass
def test_aggregate_post_predicate(self):
# Test invalid having clause
metrics = [self.table.f.sum().name('total')]
by = ['g']
invalid_having_cases = [
self.table.f.sum(),
self.table.f > 2
]
for case in invalid_having_cases:
self.assertRaises(com.ExpressionError, self.table.aggregate,
metrics, by=by, having=[case])
def test_group_by_having_api(self):
# #154, add a HAVING post-predicate in a composable way
metric = self.table.f.sum().name('foo')
postp = self.table.d.mean() > 1
expr = (self.table
.group_by('g')
.having(postp)
.aggregate(metric))
expected = self.table.aggregate(metric, by='g', having=postp)
assert_equal(expr, expected)
def test_aggregate_root_table_internal(self):
pass
def test_compound_aggregate_expr(self):
# See ibis #24
compound_expr = (self.table['a'].sum() /
self.table['a'].mean()).name('foo')
assert ops.is_reduction(compound_expr)
# Validates internally
self.table.aggregate([compound_expr])
def test_groupby_convenience(self):
metrics = [self.table.f.sum().name('total')]
expr = self.table.group_by('g').aggregate(metrics)
expected = self.table.aggregate(metrics, by=['g'])
assert_equal(expr, expected)
group_expr = self.table.g.cast('double').name('g')
expr = self.table.group_by(group_expr).aggregate(metrics)
expected = self.table.aggregate(metrics, by=[group_expr])
assert_equal(expr, expected)
def test_group_by_count_size(self):
# #148, convenience for interactive use, and so forth
result1 = self.table.group_by('g').size()
result2 = self.table.group_by('g').count()
expected = (self.table.group_by('g')
.aggregate([self.table.count().name('count')]))
assert_equal(result1, expected)
assert_equal(result2, expected)
result = self.table.group_by('g').count('foo')
expected = (self.table.group_by('g')
.aggregate([self.table.count().name('foo')]))
assert_equal(result, expected)
def test_group_by_column_select_api(self):
grouped = self.table.group_by('g')
result = grouped.f.sum()
expected = grouped.aggregate(self.table.f.sum().name('sum(f)'))
assert_equal(result, expected)
supported_functions = ['sum', 'mean', 'count', 'size', 'max', 'min']
# make sure they all work
for fn in supported_functions:
getattr(grouped.f, fn)()
def test_value_counts_convenience(self):
# #152
result = self.table.g.value_counts()
expected = (self.table.group_by('g')
.aggregate(self.table.count().name('count')))
assert_equal(result, expected)
def test_isin_value_counts(self):
# #157, this code path was untested before
bool_clause = self.table.g.notin(['1', '4', '7'])
# it works!
bool_clause.name('notin').value_counts()
def test_value_counts_unnamed_expr(self):
nation = self.con.table('tpch_nation')
expr = nation.n_name.lower().value_counts()
expected = nation.n_name.lower().name('unnamed').value_counts()
assert_equal(expr, expected)
def test_aggregate_unnamed_expr(self):
nation = self.con.table('tpch_nation')
expr = nation.n_name.lower().left(1)
self.assertRaises(com.ExpressionError, nation.group_by(expr).aggregate,
nation.count().name('metric'))
def test_default_reduction_names(self):
d = self.table.f
cases = [
(d.count(), 'count'),
(d.sum(), 'sum'),
(d.mean(), 'mean'),
(d.approx_nunique(), 'approx_nunique'),
(d.approx_median(), 'approx_median'),
(d.min(), 'min'),
(d.max(), 'max')
]
for expr, ex_name in cases:
assert expr.get_name() == ex_name
class TestJoinsUnions(BasicTestCase, unittest.TestCase):
def test_join_no_predicate_list(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
pred = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, pred)
expected = region.inner_join(nation, [pred])
assert_equal(joined, expected)
def test_equijoin_schema_merge(self):
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'int32')])
pred = table1['key1'] == table2['key2']
join_types = ['inner_join', 'left_join', 'outer_join']
ex_schema = api.Schema(['key1', 'value1', 'key2', 'stuff'],
['string', 'double', 'string', 'int32'])
for fname in join_types:
f = getattr(table1, fname)
joined = f(table2, [pred]).materialize()
assert_equal(joined.schema(), ex_schema)
def test_join_combo_with_projection(self):
# Test a case where there is column name overlap, but the projection
# passed makes it a non-issue. Highly relevant with self-joins
#
# For example, where left/right have some field names in common:
# SELECT left.*, right.a, right.b
# FROM left join right on left.key = right.key
t = self.table
t2 = t.add_column(t['f'] * 2, 'foo')
t2 = t2.add_column(t['f'] * 4, 'bar')
# this works
joined = t.left_join(t2, [t['g'] == t2['g']])
proj = joined.projection([t, t2['foo'], t2['bar']])
repr(proj)
def test_join_getitem_projection(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
pred = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, pred)
result = joined[nation]
expected = joined.projection(nation)
assert_equal(result, expected)
def test_self_join(self):
# Self-joins are problematic with this design because column
# expressions may reference either the left or right self. For example:
#
# SELECT left.key, sum(left.value - right.value) as total_deltas
# FROM table left
# INNER JOIN table right
# ON left.current_period = right.previous_period + 1
# GROUP BY 1
#
# One way around the self-join issue is to force the user to add
# prefixes to the joined fields, then project using those. Not that
# satisfying, though.
left = self.table
right = self.table.view()
metric = (left['a'] - right['b']).mean().name('metric')
joined = left.inner_join(right, [right['g'] == left['g']])
# basic check there's no referential problems
result_repr = repr(joined)
assert 'ref_0' in result_repr
assert 'ref_1' in result_repr
# Cannot be immediately materialized because of the schema overlap
self.assertRaises(RelationError, joined.materialize)
# Project out left table schema
proj = joined[[left]]
assert_equal(proj.schema(), left.schema())
# Try aggregating on top of joined
aggregated = joined.aggregate([metric], by=[left['g']])
ex_schema = api.Schema(['g', 'metric'], ['string', 'double'])
assert_equal(aggregated.schema(), ex_schema)
def test_self_join_no_view_convenience(self):
# #165, self joins ought to be possible when the user specifies the
# column names to join on rather than referentially-valid expressions
result = self.table.join(self.table, [('g', 'g')])
t2 = self.table.view()
expected = self.table.join(t2, self.table.g == t2.g)
assert_equal(result, expected)
def test_materialized_join_reference_bug(self):
# GH#403
orders = self.con.table('tpch_orders')
customer = self.con.table('tpch_customer')
lineitem = self.con.table('tpch_lineitem')
items = (orders
.join(lineitem, orders.o_orderkey == lineitem.l_orderkey)
[lineitem, orders.o_custkey, orders.o_orderpriority]
.join(customer, [('o_custkey', 'c_custkey')])
.materialize())
items['o_orderpriority'].value_counts()
def test_join_project_after(self):
# e.g.
#
# SELECT L.foo, L.bar, R.baz, R.qux
# FROM table1 L
# INNER JOIN table2 R
# ON L.key = R.key
#
# or
#
# SELECT L.*, R.baz
# ...
#
# The default for a join is selecting all fields if possible
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'int32')])
pred = table1['key1'] == table2['key2']
joined = table1.left_join(table2, [pred])
projected = joined.projection([table1, table2['stuff']])
assert projected.schema().names == ['key1', 'value1', 'stuff']
projected = joined.projection([table2, table1['key1']])
assert projected.schema().names == ['key2', 'stuff', 'key1']
def test_semi_join_schema(self):
# A left semi join discards the schema of the right table
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('stuff', 'double')])
pred = table1['key1'] == table2['key2']
semi_joined = table1.semi_join(table2, [pred]).materialize()
result_schema = semi_joined.schema()
assert_equal(result_schema, table1.schema())
def test_cross_join(self):
agg_exprs = [self.table['a'].sum().name('sum_a'),
self.table['b'].mean().name('mean_b')]
scalar_aggs = self.table.aggregate(agg_exprs)
joined = self.table.cross_join(scalar_aggs).materialize()
agg_schema = api.Schema(['sum_a', 'mean_b'], ['int64', 'double'])
ex_schema = self.table.schema().append(agg_schema)
assert_equal(joined.schema(), ex_schema)
def test_cross_join_multiple(self):
a = self.table['a', 'b', 'c']
b = self.table['d', 'e']
c = self.table['f', 'h']
joined = ibis.cross_join(a, b, c)
expected = a.cross_join(b.cross_join(c))
assert joined.equals(expected)
def test_join_compound_boolean_predicate(self):
# The user might have composed predicates through logical operations
pass
def test_multiple_join_deeper_reference(self):
# Join predicates down the chain might reference one or more root
# tables in the hierarchy.
table1 = ibis.table({'key1': 'string', 'key2': 'string',
'value1': 'double'})
table2 = ibis.table({'key3': 'string', 'value2': 'double'})
table3 = ibis.table({'key4': 'string', 'value3': 'double'})
joined = table1.inner_join(table2, [table1['key1'] == table2['key3']])
joined2 = joined.inner_join(table3, [table1['key2'] == table3['key4']])
# it works, what more should we test here?
materialized = joined2.materialize()
repr(materialized)
def test_filter_join_unmaterialized(self):
table1 = ibis.table({'key1': 'string', 'key2': 'string',
'value1': 'double'})
table2 = ibis.table({'key3': 'string', 'value2': 'double'})
# It works!
joined = table1.inner_join(table2, [table1['key1'] == table2['key3']])
filtered = joined.filter([table1.value1 > 0])
repr(filtered)
def test_filter_on_projected_field(self):
# See #173. Impala and other SQL engines do not allow filtering on a
# just-created alias in a projection
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [customer,
region.r_name.name('region'),
orders.o_totalprice.name('amount'),
orders.o_orderdate
.cast('timestamp').name('odate')]
all_join = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey))
tpch = all_join[fields_of_interest]
# Correlated subquery, yikes!
t2 = tpch.view()
conditional_avg = t2[(t2.region == tpch.region)].amount.mean()
# `amount` is part of the projection above as an aliased field
amount_filter = tpch.amount > conditional_avg
result = tpch.filter([amount_filter])
# Now then! Predicate pushdown here is inappropriate, so we check that
# it didn't occur.
# If filter were pushed below projection, the top-level operator type
# would be Projection instead.
assert type(result.op()) == ops.Filter
def test_join_can_rewrite_errant_predicate(self):
# Join predicate references a derived table, but we can salvage and
# rewrite it to get the join semantics out
# see ibis #74
table = ibis.table([
('c', 'int32'),
('f', 'double'),
('g', 'string')
], 'foo_table')
table2 = ibis.table([
('key', 'string'),
('value', 'double')
], 'bar_table')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
result = table.inner_join(table2, [table3['g'] == table2['key']])
expected = table.inner_join(table2, [table['g'] == table2['key']])
assert_equal(result, expected)
def test_non_equijoins(self):
# Move non-equijoin predicates to WHERE during SQL translation if
# possible, per #107
pass
def test_join_overlapping_column_names(self):
pass
def test_join_key_alternatives(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
# Join with tuples
joined = t1.inner_join(t2, [('foo_id', 'foo_id')])
joined2 = t1.inner_join(t2, [(t1.foo_id, t2.foo_id)])
# Join with single expr
joined3 = t1.inner_join(t2, t1.foo_id == t2.foo_id)
expected = t1.inner_join(t2, [t1.foo_id == t2.foo_id])
assert_equal(joined, expected)
assert_equal(joined2, expected)
assert_equal(joined3, expected)
self.assertRaises(com.ExpressionError, t1.inner_join, t2,
[('foo_id', 'foo_id', 'foo_id')])
def test_join_invalid_refs(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
t3 = self.con.table('star3')
predicate = t1.bar_id == t3.bar_id
self.assertRaises(com.RelationError, t1.inner_join, t2, [predicate])
def test_join_non_boolean_expr(self):
t1 = self.con.table('star1')
t2 = self.con.table('star2')
# oops
predicate = t1.f * t2.value1
self.assertRaises(com.ExpressionError, t1.inner_join, t2, [predicate])
def test_unravel_compound_equijoin(self):
t1 = ibis.table([
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value1', 'double')
], 'foo_table')
t2 = ibis.table([
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value2', 'double')
], 'bar_table')
p1 = t1.key1 == t2.key1
p2 = t1.key2 == t2.key2
p3 = t1.key3 == t2.key3
joined = t1.inner_join(t2, [p1 & p2 & p3])
expected = t1.inner_join(t2, [p1, p2, p3])
assert_equal(joined, expected)
def test_join_add_prefixes(self):
pass
def test_join_nontrivial_exprs(self):
pass
def test_union(self):
schema1 = [
('key', 'string'),
('value', 'double')
]
schema2 = [
('key', 'string'),
('key2', 'string'),
('value', 'double')
]
t1 = ibis.table(schema1, 'foo')
t2 = ibis.table(schema1, 'bar')
t3 = ibis.table(schema2, 'baz')
result = t1.union(t2)
assert isinstance(result.op(), ops.Union)
assert not result.op().distinct
result = t1.union(t2, distinct=True)
assert isinstance(result.op(), ops.Union)
assert result.op().distinct
self.assertRaises(ir.RelationError, t1.union, t3)
def test_column_ref_on_projection_rename(self):
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
joined = (region.inner_join(
nation, [region.r_regionkey == nation.n_regionkey])
.inner_join(
customer, [customer.c_nationkey == nation.n_nationkey]))
proj_exprs = [customer, nation.n_name.name('nation'),
region.r_name.name('region')]
joined = joined.projection(proj_exprs)
metrics = [joined.c_acctbal.sum().name('metric')]
# it works!
joined.aggregate(metrics, by=['region'])
class TestSemiAntiJoinPredicates(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
self.t1 = ibis.table([
('key1', 'string'),
('key2', 'string'),
('value1', 'double')
], 'foo')
self.t2 = ibis.table([
('key1', 'string'),
('key2', 'string')
], 'bar')
def test_simple_existence_predicate(self):
cond = (self.t1.key1 == self.t2.key1).any()
assert isinstance(cond, ir.BooleanArray)
op = cond.op()
assert isinstance(op, ops.Any)
# it works!
expr = self.t1[cond]
assert isinstance(expr.op(), ops.Filter)
def test_cannot_use_existence_expression_in_join(self):
# Join predicates must consist only of comparisons
pass
def test_not_exists_predicate(self):
cond = -(self.t1.key1 == self.t2.key1).any()
assert isinstance(cond.op(), ops.NotAny)
class TestLateBindingFunctions(BasicTestCase, unittest.TestCase):
def test_aggregate_metrics(self):
functions = [lambda x: x.e.sum().name('esum'),
lambda x: x.f.sum().name('fsum')]
exprs = [self.table.e.sum().name('esum'),
self.table.f.sum().name('fsum')]
result = self.table.aggregate(functions[0])
expected = self.table.aggregate(exprs[0])
assert_equal(result, expected)
result = self.table.aggregate(functions)
expected = self.table.aggregate(exprs)
assert_equal(result, expected)
def test_group_by_keys(self):
m = self.table.mutate(foo=self.table.f * 2,
bar=self.table.e / 2)
expr = m.group_by(lambda x: x.foo).size()
expected = m.group_by('foo').size()
assert_equal(expr, expected)
expr = m.group_by([lambda x: x.foo, lambda x: x.bar]).size()
expected = m.group_by(['foo', 'bar']).size()
assert_equal(expr, expected)
def test_having(self):
m = self.table.mutate(foo=self.table.f * 2,
bar=self.table.e / 2)
expr = (m.group_by('foo')
.having(lambda x: x.foo.sum() > 10)
.size())
expected = (m.group_by('foo')
.having(m.foo.sum() > 10)
.size())
assert_equal(expr, expected)
def test_filter(self):
m = self.table.mutate(foo=self.table.f * 2,
bar=self.table.e / 2)
result = m.filter(lambda x: x.foo > 10)
result2 = m[lambda x: x.foo > 10]
expected = m[m.foo > 10]
assert_equal(result, expected)
assert_equal(result2, expected)
result = m.filter([lambda x: x.foo > 10,
lambda x: x.bar < 0])
expected = m.filter([m.foo > 10, m.bar < 0])
assert_equal(result, expected)
def test_sort_by(self):
m = self.table.mutate(foo=self.table.e + self.table.f)
result = m.sort_by(lambda x: -x.foo)
expected = m.sort_by(-m.foo)
assert_equal(result, expected)
result = m.sort_by(lambda x: ibis.desc(x.foo))
expected = m.sort_by(ibis.desc('foo'))
assert_equal(result, expected)
result = m.sort_by(ibis.desc(lambda x: x.foo))
expected = m.sort_by(ibis.desc('foo'))
assert_equal(result, expected)
def test_projection(self):
m = self.table.mutate(foo=self.table.f * 2)
def f(x):
return (x.foo * 2).name('bar')
result = m.projection([f, 'f'])
result2 = m[f, 'f']
expected = m.projection([f(m), 'f'])
assert_equal(result, expected)
assert_equal(result2, expected)
def test_mutate(self):
m = self.table.mutate(foo=self.table.f * 2)
def g(x):
return x.foo * 2
def h(x):
return x.bar * 2
result = m.mutate(bar=g).mutate(baz=h)
m2 = m.mutate(bar=g(m))
expected = m2.mutate(baz=h(m2))
assert_equal(result, expected)
def test_add_column(self):
def g(x):
return x.f * 2
result = self.table.add_column(g, name='foo')
expected = self.table.mutate(foo=g)
assert_equal(result, expected)
def test_groupby_mutate(self):
t = self.table
g = t.group_by('g').order_by('f')
expr = g.mutate(foo=lambda x: x.f.lag(),
bar=lambda x: x.f.rank())
expected = g.mutate(foo=t.f.lag(),
bar=t.f.rank())
assert_equal(expr, expected)
def test_groupby_projection(self):
t = self.table
g = t.group_by('g').order_by('f')
expr = g.projection([lambda x: x.f.lag().name('foo'),
lambda x: x.f.rank().name('bar')])
expected = g.projection([t.f.lag().name('foo'),
t.f.rank().name('bar')])
assert_equal(expr, expected)
def test_set_column(self):
def g(x):
return x.f * 2
result = self.table.set_column('f', g)
expected = self.table.set_column('f', self.table.f * 2)
assert_equal(result, expected)
| korotkyn/ibis | ibis/expr/tests/test_table.py | Python | apache-2.0 | 45,588 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
for gv in orm['video.glossvideo'].objects.all():
try:
gloss = orm['dictionary.gloss'].objects.get(sn=gv.gloss_sn)
gv.gloss = gloss
gv.save()
except:
print "Can't find gloss for ", gv.gloss_sn
def backwards(self, orm):
"Write your backwards methods here."
# we'll copy over the gloss.sn to gloss_sn if present
for gv in orm['video.glossvideo'].objects.all():
if gv.gloss.sn != None:
gv.gloss_sn = gv.gloss.sn
gv.save()
models = {
u'dictionary.dialect': {
'Meta': {'ordering': "['language', 'name']", 'object_name': 'Dialect'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dictionary.Language']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'dictionary.gloss': {
'Meta': {'ordering': "['idgloss']", 'object_name': 'Gloss'},
'StemSN': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'annotation_idgloss': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'aslgloss': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'asloantf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'asltf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'blend': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'blendtf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'bslgloss': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'bslloantf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'bsltf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'compound': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'comptf': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'dialect': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dictionary.Dialect']", 'symmetrical': 'False'}),
'domhndsh': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'final_domhndsh': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'final_loc': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'final_palm_orientation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'final_relative_orientation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'final_secondary_loc': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'final_subhndsh': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idgloss': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'inWeb': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'initial_palm_orientation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'initial_relative_orientation': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'initial_secondary_loc': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'inittext': ('django.db.models.fields.CharField', [], {'max_length': "'50'", 'blank': 'True'}),
'isNew': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dictionary.Language']", 'symmetrical': 'False'}),
'locprim': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'locsecond': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'morph': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sedefinetf': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'segloss': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sense': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sn': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'subhndsh': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'})
},
u'dictionary.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'video.glossvideo': {
'Meta': {'object_name': 'GlossVideo'},
'gloss': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dictionary.Gloss']"}),
'gloss_sn': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'videofile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
u'video.video': {
'Meta': {'object_name': 'Video'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'videofile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['video']
symmetrical = True
| Signbank/Auslan-signbank | signbank/video/migrations/0006_copy_gloss_sn.py | Python | bsd-3-clause | 7,181 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.vision_v1.types import image_annotator
from google.longrunning import operations_pb2 # type: ignore
from .base import ImageAnnotatorTransport, DEFAULT_CLIENT_INFO
class ImageAnnotatorGrpcTransport(ImageAnnotatorTransport):
"""gRPC backend transport for ImageAnnotator.
Service that performs Google Cloud Vision API detection tasks
over client images, such as face, landmark, logo, label, and
text detection. The ImageAnnotator service returns detected
entities from the images.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "vision.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def batch_annotate_images(
self,
) -> Callable[
[image_annotator.BatchAnnotateImagesRequest],
image_annotator.BatchAnnotateImagesResponse,
]:
r"""Return a callable for the batch annotate images method over gRPC.
Run image detection and annotation for a batch of
images.
Returns:
Callable[[~.BatchAnnotateImagesRequest],
~.BatchAnnotateImagesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_annotate_images" not in self._stubs:
self._stubs["batch_annotate_images"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1.ImageAnnotator/BatchAnnotateImages",
request_serializer=image_annotator.BatchAnnotateImagesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateImagesResponse.deserialize,
)
return self._stubs["batch_annotate_images"]
@property
def batch_annotate_files(
self,
) -> Callable[
[image_annotator.BatchAnnotateFilesRequest],
image_annotator.BatchAnnotateFilesResponse,
]:
r"""Return a callable for the batch annotate files method over gRPC.
Service that performs image detection and annotation
for a batch of files. Now only "application/pdf",
"image/tiff" and "image/gif" are supported.
This service will extract at most 5 (customers can
specify which 5 in AnnotateFileRequest.pages) frames
(gif) or pages (pdf or tiff) from each file provided and
perform detection and annotation for each image
extracted.
Returns:
Callable[[~.BatchAnnotateFilesRequest],
~.BatchAnnotateFilesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_annotate_files" not in self._stubs:
self._stubs["batch_annotate_files"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1.ImageAnnotator/BatchAnnotateFiles",
request_serializer=image_annotator.BatchAnnotateFilesRequest.serialize,
response_deserializer=image_annotator.BatchAnnotateFilesResponse.deserialize,
)
return self._stubs["batch_annotate_files"]
@property
def async_batch_annotate_images(
self,
) -> Callable[
[image_annotator.AsyncBatchAnnotateImagesRequest], operations_pb2.Operation
]:
r"""Return a callable for the async batch annotate images method over gRPC.
Run asynchronous image detection and annotation for a list of
images.
Progress and results can be retrieved through the
``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateImagesResponse`` (results).
This service will write image annotation outputs to json files
in customer GCS bucket, each json file containing
BatchAnnotateImagesResponse proto.
Returns:
Callable[[~.AsyncBatchAnnotateImagesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "async_batch_annotate_images" not in self._stubs:
self._stubs["async_batch_annotate_images"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1.ImageAnnotator/AsyncBatchAnnotateImages",
request_serializer=image_annotator.AsyncBatchAnnotateImagesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["async_batch_annotate_images"]
@property
def async_batch_annotate_files(
self,
) -> Callable[
[image_annotator.AsyncBatchAnnotateFilesRequest], operations_pb2.Operation
]:
r"""Return a callable for the async batch annotate files method over gRPC.
Run asynchronous image detection and annotation for a list of
generic files, such as PDF files, which may contain multiple
pages and multiple images per page. Progress and results can be
retrieved through the ``google.longrunning.Operations``
interface. ``Operation.metadata`` contains ``OperationMetadata``
(metadata). ``Operation.response`` contains
``AsyncBatchAnnotateFilesResponse`` (results).
Returns:
Callable[[~.AsyncBatchAnnotateFilesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "async_batch_annotate_files" not in self._stubs:
self._stubs["async_batch_annotate_files"] = self.grpc_channel.unary_unary(
"/google.cloud.vision.v1.ImageAnnotator/AsyncBatchAnnotateFiles",
request_serializer=image_annotator.AsyncBatchAnnotateFilesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["async_batch_annotate_files"]
def close(self):
self.grpc_channel.close()
__all__ = ("ImageAnnotatorGrpcTransport",)
| googleapis/python-vision | google/cloud/vision_v1/services/image_annotator/transports/grpc.py | Python | apache-2.0 | 17,525 |
# -*- coding: utf-8 -*-
'''
Open Facebook allows you to use Facebook's open graph API with simple python code
**Features**
* Supported and maintained
* Tested so people can contribute
* Facebook exceptions are mapped
* Logging
**Basic examples**::
facebook = OpenFacebook(access_token)
# Getting info about me
facebook.get('me')
# Learning some more about fashiolista
facebook.get('fashiolista')
# Writing your first comment
facebook.set('fashiolista/comments', message='I love Fashiolista!')
# Posting to a users wall
facebook.set('me/feed', message='check out fashiolista',
url='http://www.fashiolista.com')
# Liking a page
facebook.set('fashiolista/likes')
# Getting who likes cocacola
facebook.set('cocacola/likes')
# Use fql to retrieve your name
facebook.fql('SELECT name FROM user WHERE uid = me()')
# Executing fql in batch
facebook.batch_fql([
'SELECT uid, name, pic_square FROM user WHERE uid = me()',
'SELECT uid, rsvp_status FROM event_member WHERE eid=12345678',
])
# Uploading pictures
photo_urls = [
'http://e.fashiocdn.com/images/entities/0/7/B/I/9/0.365x365.jpg',
'http://e.fashiocdn.com/images/entities/0/5/e/e/r/0.365x365.jpg',
]
for photo in photo_urls:
print facebook.set('me/feed', message='Check out Fashiolista',
picture=photo, url='http://www.fashiolista.com')
**Getting an access token**
Once you get your access token, Open Facebook gives you access to the Facebook API
There are 3 ways of getting a facebook access_token and these are currently
implemented by Django Facebook.
1. code is passed as request parameter and traded for an
access_token using the api
2. code is passed through a signed cookie and traded for an access_token
3. access_token is passed directly (retrieved through javascript, which
would be bad security, or through one of the mobile flows.)
If you are looking to develop your own flow for a different framework have a look at
Facebook's documentation:
http://developers.facebook.com/docs/authentication/
Also have a look at the :class:`.FacebookRequired` decorator and :func:`get_persistent_graph` function to
understand the required functionality
**Api docs**:
'''
from django.http import QueryDict
from django.utils import six
from django.utils.http import urlencode
from django_facebook import settings as facebook_settings
from open_facebook import exceptions as facebook_exceptions
from open_facebook.utils import json, encode_params, send_warning, memoized, \
stop_statsd, start_statsd
import logging
from django_facebook.utils import to_int
import ssl
import re
try:
# python 2 imports
from urlparse import urlparse
from urllib2 import build_opener, HTTPError, URLError
except ImportError:
# python 3 imports
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
from urllib.request import build_opener
logger = logging.getLogger(__name__)
# base timeout, actual timeout will increase when requests fail
REQUEST_TIMEOUT = 10
# two retries was too little, sometimes facebook is a bit flaky
REQUEST_ATTEMPTS = 3
class FacebookConnection(object):
'''
Shared utility class implementing the parsing
of Facebook API responses
'''
api_url = 'https://graph.facebook.com/'
# this older url is still used for fql requests
old_api_url = 'https://api.facebook.com/method/'
@classmethod
def request(cls, path='', post_data=None, old_api=False, **params):
'''
Main function for sending the request to facebook
**Example**::
FacebookConnection.request('me')
:param path:
The path to request, examples: /me/friends/, /me/likes/
:param post_data:
A dictionary of data to post
:param parms:
The get params to include
'''
api_base_url = cls.old_api_url if old_api else cls.api_url
if getattr(cls, 'access_token', None):
params['access_token'] = cls.access_token
url = '%s%s?%s' % (api_base_url, path, urlencode(params))
response = cls._request(url, post_data)
return response
@classmethod
def _request(cls, url, post_data=None, timeout=REQUEST_TIMEOUT,
attempts=REQUEST_ATTEMPTS):
# change fb__explicitly_shared to fb:explicitly_shared
if post_data:
post_data = dict(
(k.replace('__', ':'), v) for k, v in post_data.items())
logger.info('requesting url %s with post data %s', url, post_data)
post_request = (post_data is not None or 'method=post' in url)
if post_request and facebook_settings.FACEBOOK_READ_ONLY:
logger.info('running in readonly mode')
response = dict(id=123456789, setting_read_only=True)
return response
# nicely identify ourselves before sending the request
opener = build_opener()
opener.addheaders = [('User-agent', 'Open Facebook Python')]
# get the statsd path to track response times with
path = urlparse(url).path
statsd_path = path.replace('.', '_')
# give it a few shots, connection is buggy at times
timeout_mp = 0
while attempts:
# gradually increase the timeout upon failure
timeout_mp += 1
extended_timeout = timeout * timeout_mp
response_file = None
encoded_params = encode_params(post_data) if post_data else None
post_string = (urlencode(encoded_params)
if post_data else None)
try:
start_statsd('facebook.%s' % statsd_path)
try:
response_file = opener.open(
url, post_string, timeout=extended_timeout)
response = response_file.read().decode('utf8')
except (HTTPError,) as e:
response_file = e
response = response_file.read().decode('utf8')
# Facebook sents error codes for many of their flows
# we still want the json to allow for proper handling
msg_format = 'FB request, error type %s, code %s'
logger.warn(msg_format, type(e), getattr(e, 'code', None))
# detect if its a server or application error
server_error = cls.is_server_error(e, response)
if server_error:
# trigger a retry
raise URLError(
'Facebook is down %s' % response)
break
except (HTTPError, URLError, ssl.SSLError) as e:
# These are often temporary errors, so we will retry before
# failing
error_format = 'Facebook encountered a timeout (%ss) or error %s'
logger.warn(error_format, extended_timeout, str(e))
attempts -= 1
if not attempts:
# if we have no more attempts actually raise the error
error_instance = facebook_exceptions.convert_unreachable_exception(
e)
error_msg = 'Facebook request failed after several retries, raising error %s'
logger.warn(error_msg, error_instance)
raise error_instance
finally:
if response_file:
response_file.close()
stop_statsd('facebook.%s' % statsd_path)
# Faceboook response is either
# Valid json
# A string which is a querydict (a=b&c=d...etc)
# A html page stating FB is having trouble (but that shouldnt reach
# this part of the code)
try:
parsed_response = json.loads(response)
logger.info('facebook send response %s' % parsed_response)
except Exception as e:
# using exception because we need to support multiple json libs :S
parsed_response = QueryDict(response, True)
logger.info('facebook send response %s' % parsed_response)
if parsed_response and isinstance(parsed_response, dict):
# of course we have two different syntaxes
if parsed_response.get('error'):
cls.raise_error(parsed_response['error']['type'],
parsed_response['error']['message'],
parsed_response['error'].get('code'))
elif parsed_response.get('error_code'):
cls.raise_error(parsed_response['error_code'],
parsed_response['error_msg'])
return parsed_response
@classmethod
def is_server_error(cls, e, response):
'''
Checks an HTTPError to see if Facebook is down or we are using the
API in the wrong way
Facebook doesn't clearly distinquish between the two, so this is a bit
of a hack
'''
from open_facebook.utils import is_json
server_error = False
if hasattr(e, 'code') and e.code == 500:
server_error = True
# Facebook status codes are used for application logic
# http://fbdevwiki.com/wiki/Error_codes#User_Permission_Errors
# The only way I know to detect an actual server error is to check if
# it looks like their error page
# TODO: think of a better solution....
error_matchers = [
'<title>Facebook | Error</title>',
'Sorry, something went wrong.'
]
is_error_page = all(
[matcher in response for matcher in error_matchers])
if is_error_page:
server_error = True
# if it looks like json, facebook is probably not down
if is_json(response):
server_error = False
return server_error
@classmethod
def raise_error(cls, error_type, message, error_code=None):
'''
Lookup the best error class for the error and raise it
**Example**::
FacebookConnection.raise_error(10, 'OAuthException')
:param error_type:
the error type from the facebook api call
:param message:
the error message from the facebook api call
:param error_code:
optionally the error code which facebook send
'''
default_error_class = facebook_exceptions.OpenFacebookException
# get the error code
error_code = error_code or cls.get_code_from_message(message)
# also see http://fbdevwiki.com/wiki/Error_codes#User_Permission_Errors
logger.info('Trying to match error code %s to error class', error_code)
# lookup by error code takes precedence
error_class = cls.match_error_code(error_code)
# try to get error class by direct lookup
if not error_class:
if not isinstance(error_type, int):
error_class = getattr(facebook_exceptions, error_type, None)
if error_class and not issubclass(error_class, default_error_class):
error_class = None
# hack for missing parameters
if 'Missing' in message and 'parameter' in message:
error_class = facebook_exceptions.MissingParameter
# hack for Unsupported delete request
if 'Unsupported delete request' in message:
error_class = facebook_exceptions.UnsupportedDeleteRequest
# fallback to the default
if not error_class:
error_class = default_error_class
logger.info('Matched error to class %s', error_class)
error_message = message
if error_code:
# this is handy when adding new exceptions for facebook errors
error_message = u'%s (error code %s)' % (message, error_code)
raise error_class(error_message)
@classmethod
def get_code_from_message(cls, message):
# map error classes to facebook error codes
# find the error code
error_code = None
error_code_re = re.compile('\(#(\d+)\)')
matches = error_code_re.match(message)
matching_groups = matches.groups() if matches else None
if matching_groups:
error_code = to_int(matching_groups[0]) or None
return error_code
@classmethod
def get_sorted_exceptions(cls):
from open_facebook.exceptions import get_exception_classes
exception_classes = get_exception_classes()
exception_classes.sort(key=lambda e: e.range())
return exception_classes
@classmethod
def match_error_code(cls, error_code):
'''
Return the right exception class for the error code
'''
exception_classes = cls.get_sorted_exceptions()
error_class = None
for class_ in exception_classes:
codes_list = class_.codes_list()
# match the error class
matching_error_class = None
for code in codes_list:
if isinstance(code, tuple):
start, stop = code
if error_code and start <= error_code <= stop:
matching_error_class = class_
logger.info('Matched error on code %s', code)
elif isinstance(code, (int, six.integer_types)):
if int(code) == error_code:
matching_error_class = class_
logger.info('Matched error on code %s', code)
else:
raise(
ValueError, 'Dont know how to handle %s of '
'type %s' % (code, type(code)))
# tell about the happy news if we found something
if matching_error_class:
error_class = matching_error_class
break
return error_class
class FacebookAuthorization(FacebookConnection):
'''
Methods for getting us an access token
There are several flows we must support
* js authentication flow (signed cookie)
* facebook app authentication flow (signed cookie)
* facebook oauth redirect (code param in url)
These 3 options need to be converted to an access token
Also handles several testing scenarios
* get app access token
* create test user
* get_or_create_test_user
'''
@classmethod
def convert_code(cls, code,
redirect_uri='http://local.mellowmorning.com:8000/facebook/connect/'):
'''
Turns a code into an access token
**Example**::
FacebookAuthorization.convert_code(code)
:param code:
The code to convert
:param redirect_uri:
The redirect uri with which the code was requested
:returns: dict
'''
kwargs = cls._client_info()
kwargs['code'] = code
kwargs['redirect_uri'] = redirect_uri
response = cls.request('oauth/access_token', **kwargs)
return response
@classmethod
def extend_access_token(cls, access_token):
'''
https://developers.facebook.com/roadmap/offline-access-removal/
We can extend the token only once per day
Normal short lived tokens last 1-2 hours
Long lived tokens (given by extending) last 60 days
**Example**::
FacebookAuthorization.extend_access_token(access_token)
:param access_token:
The access_token to extend
:returns: dict
'''
kwargs = cls._client_info()
kwargs['grant_type'] = 'fb_exchange_token'
kwargs['fb_exchange_token'] = access_token
response = cls.request('oauth/access_token', **kwargs)
return response
@classmethod
def _client_info(cls):
kwargs = dict(client_id=facebook_settings.FACEBOOK_APP_ID)
kwargs['client_secret'] = facebook_settings.FACEBOOK_APP_SECRET
return kwargs
@classmethod
def parse_signed_data(cls, signed_request,
secret=facebook_settings.FACEBOOK_APP_SECRET):
'''
Thanks to
http://stackoverflow.com/questions/3302946/how-to-base64-url-decode-in-python
and
http://sunilarora.org/parsing-signedrequest-parameter-in-python-bas
'''
from open_facebook.utils import base64_url_decode_php_style, smart_str
l = signed_request.split('.', 2)
encoded_sig = l[0]
payload = l[1]
from open_facebook.utils import json
sig = base64_url_decode_php_style(encoded_sig)
import hmac
import hashlib
data = json.loads(base64_url_decode_php_style(payload).decode('utf-8'))
algo = data.get('algorithm').upper()
if algo != 'HMAC-SHA256':
error_format = 'Unknown algorithm we only support HMAC-SHA256 user asked for %s'
error_message = error_format % algo
send_warning(error_message)
logger.error('Unknown algorithm')
return None
else:
expected_sig = hmac.new(smart_str(secret), msg=smart_str(payload),
digestmod=hashlib.sha256).digest()
if not hmac.compare_digest(sig, expected_sig):
error_format = 'Signature %s didnt match the expected signature %s'
error_message = error_format % (sig, expected_sig)
send_warning(error_message)
return None
else:
logger.debug('valid signed request received..')
return data
@classmethod
def get_app_access_token(cls):
'''
Get the access_token for the app that can be used for
insights and creating test users
application_id = retrieved from the developer page
application_secret = retrieved from the developer page
returns the application access_token
'''
kwargs = {
'grant_type': 'client_credentials',
'client_id': facebook_settings.FACEBOOK_APP_ID,
'client_secret': facebook_settings.FACEBOOK_APP_SECRET,
}
response = cls.request('oauth/access_token', **kwargs)
return response['access_token']
@memoized
@classmethod
def get_cached_app_access_token(cls):
'''
Caches the access token in memory, good for speeding up testing
'''
app_access_token = cls.get_app_access_token()
return app_access_token
@classmethod
def create_test_user(cls, app_access_token, permissions=None, name=None):
'''
Creates a test user with the given permissions and name
:param app_access_token:
The application's access token
:param permissions:
The list of permissions to request for the test user
:param name:
Optionally specify the name
'''
if not permissions:
permissions = ['read_stream', 'publish_stream',
'user_photos,offline_access']
if isinstance(permissions, list):
permissions = ','.join(permissions)
default_name = 'Permissions %s' % permissions.replace(
',', ' ').replace('_', '')
name = name or default_name
kwargs = {
'access_token': app_access_token,
'installed': True,
'name': name,
'method': 'post',
'permissions': permissions,
}
path = '%s/accounts/test-users' % facebook_settings.FACEBOOK_APP_ID
# add the test user data to the test user data class
test_user_data = cls.request(path, **kwargs)
test_user_data['name'] = name
test_user = TestUser(test_user_data)
return test_user
@classmethod
def get_or_create_test_user(cls, app_access_token, name=None, permissions=None, force_create=False):
'''
There is no supported way of get or creating a test user
However
- creating a test user takes around 5s
- you an only create 500 test users
So this slows your testing flow quite a bit.
This method checks your test users
Queries their names (stores the permissions in the name)
'''
if not permissions:
permissions = ['read_stream', 'publish_stream', 'publish_actions',
'user_photos,offline_access']
if isinstance(permissions, list):
permissions = ','.join(permissions)
# hacking the permissions into the name of the test user
default_name = 'Permissions %s' % permissions.replace(
',', ' ').replace('_', '')
name = name or default_name
# retrieve all test users
test_users = cls.get_test_users(app_access_token)
user_id_dict = dict([(int(u['id']), u) for u in test_users])
user_ids = map(str, user_id_dict.keys())
# use fql to figure out their names
facebook = OpenFacebook(app_access_token)
users = facebook.fql('SELECT uid, name FROM user WHERE uid in (%s)' %
','.join(user_ids))
users_dict = dict([(u['name'], u['uid']) for u in users])
user_id = users_dict.get(name)
if force_create and user_id:
# we need the users access_token, the app access token doesn't
# always work, seems to be a bug in the Facebook api
test_user_data = user_id_dict[user_id]
cls.delete_test_user(test_user_data['access_token'], user_id)
user_id = None
if user_id:
# we found our user, extend the data a bit
test_user_data = user_id_dict[user_id]
test_user_data['name'] = name
test_user = TestUser(test_user_data)
else:
# create the user
test_user = cls.create_test_user(
app_access_token, permissions, name)
return test_user
@classmethod
def get_test_users(cls, app_access_token):
kwargs = dict(access_token=app_access_token)
path = '%s/accounts/test-users' % facebook_settings.FACEBOOK_APP_ID
# retrieve all test users
response = cls.request(path, **kwargs)
test_users = response['data']
return test_users
@classmethod
def delete_test_user(cls, app_access_token, test_user_id):
kwargs = dict(access_token=app_access_token, method='delete')
path = '%s/' % test_user_id
# retrieve all test users
response = cls.request(path, **kwargs)
return response
@classmethod
def delete_test_users(cls, app_access_token):
# retrieve all test users
test_users = cls.get_test_users(app_access_token)
test_user_ids = [u['id'] for u in test_users]
for test_user_id in test_user_ids:
cls.delete_test_user(app_access_token, test_user_id)
class OpenFacebook(FacebookConnection):
'''
The main api class, initialize using
**Example**::
graph = OpenFacebook(access_token)
print(graph.get('me'))
'''
def __init__(self, access_token=None, prefetched_data=None,
expires=None, current_user_id=None, version=None):
'''
:param access_token:
The facebook Access token
'''
self.access_token = access_token
# extra data coming from signed cookies
self.prefetched_data = prefetched_data
# store to enable detection for offline usage
self.expires = expires
# hook to store the current user id if representing the
# facebook connection to a logged in user :)
self.current_user_id = current_user_id
if version is None:
version = 'v1.0'
self.version = version
def __getstate__(self):
'''
Turns the object into something easy to serialize
'''
state = dict(
access_token=self.access_token,
prefetched_data=self.prefetched_data,
expires=self.expires,
)
return state
def __setstate__(self, state):
'''
Restores the object from the state dict
'''
self.access_token = state['access_token']
self.prefetched_data = state['prefetched_data']
self.expires = state['expires']
def is_authenticated(self):
'''
Ask facebook if we have access to the users data
:returns: bool
'''
try:
me = self.me()
except facebook_exceptions.OpenFacebookException as e:
if isinstance(e, facebook_exceptions.OAuthException):
raise
me = None
authenticated = bool(me)
return authenticated
def get(self, path, version=None, **kwargs):
'''
Make a Facebook API call
**Example**::
open_facebook.get('me')
open_facebook.get('me', fields='id,name')
:param path:
The path to use for making the API call
:returns: dict
'''
version = version or self.version
kwargs['version'] = version
response = self.request(path, **kwargs)
return response
def get_many(self, *ids, **kwargs):
'''
Make a batched Facebook API call
For multiple ids
**Example**::
open_facebook.get('me', 'starbucks')
open_facebook.get('me', 'starbucks', fields='id,name')
:param path:
The path to use for making the API call
:returns: dict
'''
kwargs['ids'] = ','.join(ids)
return self.request(**kwargs)
def set(self, path, params=None, version=None, **post_data):
'''
Write data to facebook
**Example**::
open_facebook.set('me/feed', message='testing open facebook')
:param path:
The path to use for making the API call
:param params:
A dictionary of get params
:param post_data:
The kwargs for posting to facebook
:returns: dict
'''
version = version or self.version
assert self.access_token, 'Write operations require an access token'
if not params:
params = {}
params['method'] = 'post'
params['version'] = version
response = self.request(path, post_data=post_data, **params)
return response
def delete(self, path, *args, **kwargs):
'''
Delete the given bit of data
**Example**::
graph.delete(12345)
:param path:
the id of the element to remove
'''
kwargs['method'] = 'delete'
self.request(path, *args, **kwargs)
def fql(self, query, **kwargs):
'''
Runs the specified query against the Facebook FQL API.
**Example**::
open_facebook.fql('SELECT name FROM user WHERE uid = me()')
:param query:
The query to execute
:param kwargs:
Extra options to send to facebook
:returns: dict
'''
kwargs['q'] = query
path = 'fql'
response = self.request(path, **kwargs)
# return only the data for backward compatability
return response['data']
def batch_fql(self, queries_dict):
'''
queries_dict a dict with the required queries
returns the query results in:
**Example**::
response = facebook.batch_fql({
name: 'SELECT uid, name, pic_square FROM user WHERE uid = me()',
rsvp: 'SELECT uid, rsvp_status FROM event_member WHERE eid=12345678',
})
# accessing the results
response['fql_results']['name']
response['fql_results']['rsvp']
:param queries_dict:
A dictiontary of queries to execute
:returns: dict
'''
query = json.dumps(queries_dict)
query_results = self.fql(query)
named_results = dict(
[(r['name'], r['fql_result_set']) for r in query_results])
return named_results
def me(self):
'''
Cached method of requesting information about me
'''
me = getattr(self, '_me', None)
if me is None:
self._me = me = self.get('me')
return me
def permissions(self):
'''
Shortcut for self.get('me/permissions') with some extra parsing
to turn it into a dictionary of booleans
:returns: dict
'''
permissions_dict = {}
try:
permissions = {}
permissions_response = self.get('me/permissions')
# determine whether we're dealing with 1.0 or 2.0+
for permission in permissions_response.get('data', []):
# graph api 2.0+, returns multiple dicts with keys 'status' and
# 'permission'
if any(value in ['granted', 'declined'] for value in permission.values()):
for perm in permissions_response['data']:
grant = perm.get('status') == 'granted'
name = perm.get('permission')
# just in case something goes sideways
if grant and name:
permissions_dict[name] = grant
# graph api 1.0, returns single dict as {permission: intval}
elif any(value in [0, 1, '0', '1'] for value in permission.values()):
permissions = permissions_response['data'][0]
permissions_dict = dict([(k, bool(int(v)))
for k, v in permissions.items()
if v == '1' or v == 1])
break
except facebook_exceptions.OAuthException:
pass
return permissions_dict
def has_permissions(self, required_permissions):
'''
Validate if all the required_permissions are currently given
by the user
**Example**::
open_facebook.has_permissions(['publish_actions','read_stream'])
:param required_permissions:
A list of required permissions
:returns: bool
'''
permissions_dict = self.permissions()
# see if we have all permissions
has_permissions = True
for permission in required_permissions:
if permission not in permissions_dict:
has_permissions = False
return has_permissions
def my_image_url(self, size='large'):
'''
Returns the image url from your profile
Shortcut for me/picture
:param size:
the type of the image to request, see facebook for available formats
:returns: string
'''
query_dict = QueryDict('', True)
query_dict['type'] = size
query_dict['access_token'] = self.access_token
url = '%sme/picture?%s' % (self.api_url, query_dict.urlencode())
return url
def request(self, path='', post_data=None, old_api=False, version=None, **params):
url = self.get_request_url(path=path, old_api=old_api, version=version,
**params)
logger.info('requesting url %s', url)
response = self._request(url, post_data)
return response
def get_request_url(self, path='', old_api=False, version=None, **params):
'''
Gets the url for the request.
'''
api_base_url = self.old_api_url if old_api else self.api_url
version = version or self.version
if getattr(self, 'access_token', None):
params['access_token'] = self.access_token
if api_base_url.endswith('/'):
api_base_url = api_base_url[:-1]
if path and path.startswith('/'):
path = path[1:]
url = '/'.join([api_base_url, version, path])
return '%s?%s' % (url, urlencode(params))
class TestUser(object):
'''
Simple wrapper around test users
'''
def __init__(self, data):
self.name = data['name']
self.id = data['id']
self.access_token = data['access_token']
self.data = data
def graph(self):
graph = OpenFacebook(self.access_token)
return graph
def __repr__(self):
return 'Test user %s' % self.name
| javipalanca/Django-facebook | open_facebook/api.py | Python | bsd-3-clause | 32,746 |
# This file is part of Virtual Programming Lab.
#
# Virtual Programming Lab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Virtual Programming Lab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Virtual Programming Lab. If not, see <http://www.gnu.org/licenses/>.
import os.path
PROJECT_HOME = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Edmonton'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_HOME + '/static/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '-*8n--6mno1f(@^pasf%q1@^d_*san0+y%(7up4fqpv-)27d1j'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'vpl_profile.last_request_middleware.LastRequestMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
PROJECT_HOME + "/templates/"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'vpl_profile',
'classlist',
'project',
'ide',
'chat',
'lab',
'registration',
'south',
)
LOGIN_REDIRECT_URL = "/ide/"
AUTH_PROFILE_MODULE = "vpl_profile.UserProfile"
STUDENT_PROJECT_FILES = PROJECT_HOME + "/project_files/"
from localsettings import *
| buchuki/programming_lab | programming_lab/settings.py | Python | gpl-3.0 | 4,205 |
import pandas as pd
# Get group non-specific dfs:
transplants_raw = pd.read_csv('metadata-transplants.txt', sep='\t', index_col=False)
for group in ['DA', 'DB']:
# Get group specific dfs:
colo_df = pd.read_csv(f'colonized-{group}.txt', sep='\t', index_col=None)
no_colo_df = pd.read_csv(f'did-not-colonize-{group}.txt', sep='\t', index_col=None)
transplants = transplants_raw.loc[transplants_raw['Group'] == group]
countries_summary = pd.read_csv(f'detection-global-by-country-{group}.txt', sep='\t', index_col=0)
coverage_raw = pd.read_csv(f'mean-cov-{group}.txt', sep='\t', index_col=None)
coverage = coverage_raw.melt(id_vars=['bins'])
# Combine colo and no_colo data together:
colo_df['outcome'] = 'colonization'
no_colo_df['outcome'] = 'no_colonization'
outcomes = pd.concat([no_colo_df, colo_df])
# Merge dfs together:
merge1 = pd.merge(outcomes, transplants[['Recipient', 'Sample Name', 'FMT Method']], left_on='recipient', right_on='Recipient', how='left')
merge1.drop('Recipient', axis=1, inplace=True)
merge1.rename(columns={'Sample Name':'transplant_sample', 'FMT Method':'fmt_method'}, inplace=True)
merge2 = pd.merge(merge1, coverage, left_on=['MAG', 'transplant_sample'], right_on=['bins', 'variable'], how='left')
merge2.rename(columns={'value':'transplant_mean_cov_Q2Q3'}, inplace=True)
merge2.drop(columns=['variable', 'bins'], inplace=True)
merge3 = pd.merge(merge2, countries_summary, left_on='MAG', right_on='bins', how='left')
# Save:
merge3.to_csv(f'summary-for-regression-{group}.txt', sep='\t', index=False)
| merenlab/web | data/fmt-gut-colonization/files/make-summary-tables-for-regression.py | Python | mit | 1,622 |
# Landsat Util
# License: CC0 1.0 Universal
"""Tests for mixins"""
import sys
import unittest
from cStringIO import StringIO
from contextlib import contextmanager
from landsat.mixins import VerbosityMixin
# Capture function is taken from
# http://schinckel.net/2013/04/15/capture-and-test-sys.stdout-sys.stderr-in-unittest.testcase/
@contextmanager
def capture(command, *args, **kwargs):
out, sys.stdout = sys.stdout, StringIO()
command(*args, **kwargs)
sys.stdout.seek(0)
yield sys.stdout.read()
sys.stdout = out
class TestMixins(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.v = VerbosityMixin()
def test_output(self):
# just a value
with capture(self.v.output, 'this is a test') as output:
self.assertEquals("", output)
# value as normal
with capture(self.v.output, 'this is a test', normal=True) as output:
self.assertEquals("this is a test\n", output)
# value as normal with color
with capture(self.v.output, 'this is a test', normal=True, color='blue') as output:
self.assertEquals("\x1b[34mthis is a test\x1b[0m\n", output)
# value as error
with capture(self.v.output, 'this is a test', normal=True, error=True) as output:
self.assertEquals("\x1b[31mthis is a test\x1b[0m\n", output)
# value with arrow
with capture(self.v.output, 'this is a test', normal=True, arrow=True) as output:
self.assertEquals("\x1b[34m===> \x1b[0mthis is a test\n", output)
# value with indent
with capture(self.v.output, 'this is a test', normal=True, indent=1) as output:
self.assertEquals(" this is a test\n", output)
def test_exit(self):
with self.assertRaises(SystemExit):
with capture(self.v.exit, 'exit test') as output:
self.assertEquals('exit test', output)
def test_print(self):
# message in blue with arrow
with capture(self.v._print, msg='this is a test', color='blue', arrow=True) as output:
self.assertEquals("\x1b[34m===> \x1b[0m\x1b[34mthis is a test\x1b[0m\n", output)
# just a message
with capture(self.v._print, msg='this is a test') as output:
self.assertEquals("this is a test\n", output)
# message with color and indent
with capture(self.v._print, msg='this is a test', color='blue', indent=1) as output:
self.assertEquals(" \x1b[34mthis is a test\x1b[0m\n", output)
if __name__ == '__main__':
unittest.main()
| simonemurzilli/landsat-util | tests/test_mixins.py | Python | cc0-1.0 | 2,596 |
from inspect import signature
from collections import OrderedDict
class Match(OrderedDict):
@staticmethod
def _call(func, *args, **kwds):
if len(signature(func).parameters):return func(*args, **kwds)
else:return func()
@staticmethod
def _guard(case, *args, **kwds):
try:return case(*args, **kwds)
except:return False
def __call__(self, *args, **kwds):
for case in self:
if Match._guard(case, *args, **kwds):
return Match._call(self[case], *args, **kwds)
raise IndexError('match case out of range')
| thefarwind/pymatch | match.py | Python | mit | 596 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from abel.tools.polar import reproject_image_into_polar
from scipy.ndimage import map_coordinates
from scipy.ndimage.interpolation import shift
from scipy.optimize import curve_fit
def angular_integration(IM, origin=None, Jacobian=True, dr=1, dt=None):
"""Angular integration of the image.
Returns the one-dimentional intensity profile as a function of the
radial coordinate.
Note: the use of Jacobian=True applies the correct Jacobian for the
integration of a 3D object in spherical coordinates.
Parameters
----------
IM : 2D numpy.array
The data image.
origin : tuple
Image center coordinate relative to *bottom-left* corner
defaults to ``rows//2+rows%2,cols//2+cols%2``.
Jacobian : boolean
Include :math:`r\sin\\theta` in the angular sum (integration).
Also, ``Jacobian=True`` is passed to
:func:`abel.tools.polar.reproject_image_into_polar`,
which includes another value of ``r``, thus providing the appropriate
total Jacobian of :math:`r^2\sin\\theta`.
dr : float
Radial coordinate grid spacing, in pixels (default 1). `dr=0.5` may
reduce pixel granularity of the speed profile.
dt : float
Theta coordinate grid spacing in degrees.
if ``dt=None``, dt will be set such that the number of theta values
is equal to the height of the image (which should typically ensure
good sampling.)
[eturns
a#------
r : 1D numpy.array
radial coordinates
speeds : 1D numpy.array
Integrated intensity array (vs radius).
"""
polarIM, R, T = reproject_image_into_polar(
IM, origin, Jacobian=Jacobian, dr=dr, dt=dt)
dt = T[0, 1] - T[0, 0]
if Jacobian: # x r sinθ
polarIM = polarIM * R * np.abs(np.sin(T))
speeds = np.trapz(polarIM, axis=1, dx=dt)
n = speeds.shape[0]
return R[:n, 0], speeds # limit radial coordinates range to match speed
def average_radial_intensity(IM, **kwargs):
"""Calculate the average radial intensity of the image, averaged over all
angles. This differs form :func:`abel.tools.vmi.angular_integration` only
in that it returns the average intensity, and not the integrated intensity
of a 3D image. It is equavalent to calling
:func:`abel.tools.vmi.angular_integration` with
`Jacobian=True` and then dividing the result by 2*pi.
Parameters
----------
IM : 2D numpy.array
The data image.
kwargs :
additional keyword arguments to be passed to
:func:`abel.tools.vmi.angular_integration`
Returns
-------
r : 1D numpy.array
radial coordinates
intensity : 1D numpy.array
one-dimentional intensity profile as a function of the radial coordinate.
"""
R, intensity = angular_integration(IM, Jacobian=False, **kwargs)
intensity /= 2*np.pi
return R, intensity
def radial_integration(IM, radial_ranges=None):
""" Intensity variation in the angular coordinate.
This function is the :math:`\\theta`-coordinate complement to
:func:`abel.tools.vmi.angular_integration`
Evaluates intensity vs angle for defined radial ranges.
Determines the anisotropy parameter for each radial range.
See :doc:`examples/example_PAD.py <examples>`
Parameters
----------
IM : 2D numpy.array
Image data
radial_ranges : list of tuple ranges or int step
tuple integration ranges
``[(r0, r1), (r2, r3), ...]``
evaluates the intensity vs angle
for the radial ranges ``r0_r1``, ``r2_r3``, etc.
int - the whole radial range ``(0, step), (step, 2*step), ..``
Returns
-------
Beta : array of tuples
(beta0, error_beta_fit0), (beta1, error_beta_fit1), ...
corresponding to the radial ranges
Amplitude : array of tuples
(amp0, error_amp_fit0), (amp1, error_amp_fit1), ...
corresponding to the radial ranges
Rmidpt : numpy float 1d array
radial-mid point of each radial range
Intensity_vs_theta: 2D numpy.array
Intensity vs angle distribution for each selected radial range.
theta: 1D numpy.array
Angle coordinates, referenced to vertical direction.
"""
polarIM, r_grid, theta_grid = reproject_image_into_polar(IM)
theta = theta_grid[0, :] # theta coordinates
r = r_grid[:, 0] # radial coordinates
if radial_ranges is None:
radial_ranges = 1
if isinstance(radial_ranges, int):
rr = np.arange(0, r[-1], radial_ranges)
# @DanHickstein clever code to map ranges
radial_ranges = list(zip(rr[:-1], rr[1:]))
Intensity_vs_theta = []
radial_midpt = []
Beta = []
Amp = []
for rr in radial_ranges:
subr = np.logical_and(r >= rr[0], r <= rr[1])
# sum intensity across radius of spectral feature
intensity_vs_theta_at_R = np.sum(polarIM[subr], axis=0)
Intensity_vs_theta.append(intensity_vs_theta_at_R)
radial_midpt.append(np.mean(rr))
beta, amp = anisotropy_parameter(theta, intensity_vs_theta_at_R)
Beta.append(beta)
Amp.append(amp)
return Beta, Amp, radial_midpt, Intensity_vs_theta, theta
def anisotropy_parameter(theta, intensity, theta_ranges=None):
"""
Evaluate anisotropy parameter :math:`\\beta`, for :math:`I` vs :math:`\\theta` data.
.. math::
I = \\frac{\sigma_\\text{total}}{4\pi} [ 1 + \\beta P_2(\cos\\theta) ]
where :math:`P_2(x)=\\frac{3x^2-1}{2}` is a 2nd order Legendre polynomial.
`Cooper and Zare "Angular distribution of photoelectrons"
J Chem Phys 48, 942-943 (1968) <http://dx.doi.org/10.1063/1.1668742>`_
Parameters
----------
theta: 1D numpy array
Angle coordinates, referenced to the vertical direction.
intensity: 1D numpy array
Intensity variation with angle
theta_ranges: list of tuples
Angular ranges over which to fit ``[(theta1, theta2), (theta3, theta4)]``.
Allows data to be excluded from fit, default include all data
Returns
-------
beta : tuple of floats
(anisotropy parameter, fit error)
amplitude : tuple of floats
(amplitude of signal, fit error)
"""
def P2(x): # 2nd order Legendre polynomial
return (3*x*x-1)/2
def PAD(theta, beta, amplitude):
return amplitude*(1 + beta*P2(np.cos(theta))) # Eq. (1) as above
# angular range of data to be included in the fit
if theta_ranges is not None:
subtheta = np.ones(len(theta), dtype=bool)
for rt in theta_ranges:
subtheta = np.logical_and(
subtheta, np.logical_and(theta >= rt[0], theta <= rt[1]))
theta = theta[subtheta]
intensity = intensity[subtheta]
# fit angular intensity distribution
try:
popt, pcov = curve_fit(PAD, theta, intensity)
beta, amplitude = popt
error_beta, error_amplitude = np.sqrt(np.diag(pcov))
# physical range
if beta > 2 or beta < -1:
beta, error_beta = np.nan, np.nan
except:
beta, error_beta = np.nan, np.nan
amplitude, error_amplitude = np.nan, np.nan
return (beta, error_beta), (amplitude, error_amplitude)
| rth/PyAbel | abel/tools/vmi.py | Python | mit | 7,503 |
import pytest
from GEMEditor.model.classes.base import EvidenceLink, BaseTreeElement
from GEMEditor.model.classes.evidence import Evidence
class TestBaseEvidenceElement:
@pytest.fixture(autouse=True)
def setup_class(self):
self.instance = EvidenceLink()
self.evidence = Evidence()
def test_setup(self):
assert isinstance(self.instance.evidences, set)
assert len(self.instance.evidences) == 0
def test_add_evidence(self):
self.instance.add_evidence(self.evidence)
assert self.evidence in self.instance.evidences
assert len(self.instance.evidences) == 1
def test_remove_evidence(self):
self.instance.add_evidence(self.evidence)
assert self.evidence in self.instance.evidences
self.instance.remove_evidence(self.evidence)
assert self.evidence not in self.instance.evidences
assert len(self.instance.evidences) == 0
class TestBaseTreeElement:
def test_add_child(self):
parent = BaseTreeElement()
child = BaseTreeElement()
parent.add_child(child)
# Check that the items are properly linked
assert child in parent._children
assert parent in child._parents
# Check that genes returns empty as there is no child returning itself
assert not parent.genes
assert not child.reactions
def test_add_parent(self):
parent = BaseTreeElement()
child = BaseTreeElement()
child.add_parent(parent)
# Check that the items are properly linked
assert child in parent._children
assert parent in child._parents
# Check that genes returns empty as there is no child returning itself
assert not parent.genes
assert not child.reactions
def test_removal_parent(self):
parent = BaseTreeElement()
child = BaseTreeElement()
child.add_parent(parent)
child.add_parent(parent)
assert child._parents.count(parent) == 2
assert parent._children.count(child) == 2
# Remove only one entry
child.remove_parent(parent)
assert child._parents.count(parent) == 1
assert parent._children.count(child) == 1
# Readd parent
child.add_parent(parent)
assert child._parents.count(parent) == 2
assert parent._children.count(child) == 2
# Remove all entries for parent1
child.remove_parent(parent, all=True)
assert child._parents.count(parent) == 0
assert parent._children.count(child) == 0
def test_remove_parent2(self):
parent = BaseTreeElement()
child = BaseTreeElement()
child.add_parent(parent)
child.remove_parent(parent, all=True)
assert not child._parents
def test_removal_child(self):
parent = BaseTreeElement()
child = BaseTreeElement()
parent.add_child(child)
parent.add_child(child)
assert child._parents.count(parent) == 2
assert parent._children.count(child) == 2
# Remove only one entry
parent.remove_child(child)
assert child._parents.count(parent) == 1
assert parent._children.count(child) == 1
# Readd child
parent.add_child(child)
assert child._parents.count(parent) == 2
assert parent._children.count(child) == 2
# Remove all entries for child1
parent.remove_child(child, all=True)
assert child._parents.count(parent) == 0
assert parent._children.count(child) == 0
| JuBra/GEMEditor | GEMEditor/model/classes/test/test_base.py | Python | gpl-3.0 | 3,570 |
__author__ = 'Bohdan Mushkevych'
import logging
from datetime import datetime
from synergy.db.model.log_recording import LogRecording
from synergy.db.dao.log_recording_dao import LogRecordingDao
class LogRecordingHandler(logging.Handler):
def __init__(self, logger, parent_object_id):
super(LogRecordingHandler, self).__init__()
self.logger = logger
self.parent_object_id = parent_object_id
self.log_recording_dao = LogRecordingDao(logger)
def attach(self):
""" method clears existing log_recorder entries for given parent_object_id,
creates a new one and attaches this handler to the logger
from this moment every log record will be recorded in the DB """
log_recording = LogRecording(parent_object_id=self.parent_object_id, created_at=datetime.utcnow())
self.log_recording_dao.remove(self.parent_object_id)
self.log_recording_dao.update(log_recording)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
self.setFormatter(formatter)
self.logger.addHandler(self)
def detach(self):
""" method detaches this handler from the logger """
self.logger.removeHandler(self)
def emit(self, record):
msg = self.format(record)
try:
self.log_recording_dao.append_log(self.parent_object_id, msg.rstrip())
except Exception as e:
self.detach()
self.logger.error(f'Detached LogRecordingHandler. Exception on LogRecordingDao.append_log: {e}',
exc_info=True)
| mushkevych/scheduler | synergy/system/log_recording_handler.py | Python | bsd-3-clause | 1,676 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_systemconfiguration
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of SystemConfiguration Avi RESTful Object
description:
- This module is used to configure SystemConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
admin_auth_configuration:
description:
- Adminauthconfiguration settings for systemconfiguration.
dns_configuration:
description:
- Dnsconfiguration settings for systemconfiguration.
dns_virtualservice_refs:
description:
- Dns virtualservices hosting fqdn records for applications across avi vantage.
- If no virtualservices are provided, avi vantage will provide dns services for configured applications.
- Switching back to avi vantage from dns virtualservices is not allowed.
- It is a reference to an object of type virtualservice.
docker_mode:
description:
- Boolean flag to set docker_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
email_configuration:
description:
- Emailconfiguration settings for systemconfiguration.
global_tenant_config:
description:
- Tenantconfiguration settings for systemconfiguration.
linux_configuration:
description:
- Linuxconfiguration settings for systemconfiguration.
mgmt_ip_access_control:
description:
- Configure ip access control for controller to restrict open access.
ntp_configuration:
description:
- Ntpconfiguration settings for systemconfiguration.
portal_configuration:
description:
- Portalconfiguration settings for systemconfiguration.
proxy_configuration:
description:
- Proxyconfiguration settings for systemconfiguration.
snmp_configuration:
description:
- Snmpconfiguration settings for systemconfiguration.
ssh_ciphers:
description:
- Allowed ciphers list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default ciphers are allowed.
- Ssh -q cipher provides the list of default ciphers supported.
ssh_hmacs:
description:
- Allowed hmac list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default hmacs are allowed.
- Ssh -q mac provides the list of default hmacs supported.
tech_support_uploader_configuration:
description:
- Techsupportuploaderconfiguration settings for systemconfiguration.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SystemConfiguration object
avi_systemconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_systemconfiguration
"""
RETURN = '''
obj:
description: SystemConfiguration (api/systemconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
admin_auth_configuration=dict(type='dict',),
dns_configuration=dict(type='dict',),
dns_virtualservice_refs=dict(type='list',),
docker_mode=dict(type='bool',),
email_configuration=dict(type='dict',),
global_tenant_config=dict(type='dict',),
linux_configuration=dict(type='dict',),
mgmt_ip_access_control=dict(type='dict',),
ntp_configuration=dict(type='dict',),
portal_configuration=dict(type='dict',),
proxy_configuration=dict(type='dict',),
snmp_configuration=dict(type='dict',),
ssh_ciphers=dict(type='list',),
ssh_hmacs=dict(type='list',),
tech_support_uploader_configuration=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'systemconfiguration',
set([]))
if __name__ == '__main__':
main()
| halberom/ansible | lib/ansible/modules/network/avi/avi_systemconfiguration.py | Python | gpl-3.0 | 6,164 |
# -*- coding: utf-8 -*-
from django import forms
from location.models import Address
class AddressAdminForm(forms.ModelForm):
class Meta:
model = Address
widgets = {
'description': forms.Textarea(attrs={'cols': 80, 'rows': 20}),
}
| thoreg/raus-mit-den-kids | rmdk/location/forms.py | Python | mit | 275 |
from decimal import *
import datetime
from operator import attrgetter
from django.forms.formsets import formset_factory
from django.contrib.sites.models import Site
from models import *
from forms import *
try:
from notification import models as notification
except ImportError:
notification = None
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def unpaid_orders():
orders = Order.objects.exclude(state__contains="ubmitted")
unpaid = []
for order in orders:
if not order.is_fully_paid():
unpaid.append(order)
return unpaid
class SalesRow(object):
def __init__(self, product, customer, quantity, extended_price):
self.product = product
self.customer = customer
self.quantity = quantity
self.extended_price = extended_price
def sales_table(from_date, to_date):
items = OrderItem.objects.filter(
order__delivery_date__range=(from_date, to_date))
rows = {}
for item in items:
key = "-".join([str(item.product.id), str(item.order.customer.id)])
if not key in rows:
rows[key] = SalesRow(item.product.long_name,
item.order.customer.long_name,
Decimal("0"), Decimal("0"))
rows[key].quantity += item.quantity
rows[key].extended_price += item.extended_price()
return sorted(rows.values(), key=attrgetter('product', 'customer'))
def weekly_production_plans(week_date):
monday = week_date - datetime.timedelta(days=datetime.date.weekday(week_date))
saturday = monday + datetime.timedelta(days=5)
plans = ProductPlan.objects.select_related(depth=1).filter(
role="producer",
from_date__lte=week_date,
to_date__gte=saturday)
for plan in plans:
plan.category = plan.product.parent_string()
plan.product_name = plan.product.short_name
plans = sorted(plans, key=attrgetter('category',
'product_name'))
return plans
def plan_columns(from_date, to_date):
columns = []
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate.strftime('%Y-%m-%d'))
wkdate = wkdate + datetime.timedelta(days=7)
return columns
def sd_columns(from_date, to_date):
columns = []
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate.strftime('%Y_%m_%d'))
wkdate = wkdate + datetime.timedelta(days=7)
return columns
# shd plan_weeks go to the view and include headings?
# somebody needs headings!
def create_weekly_plan_forms(rows, data=None):
form_list = []
PlanCellFormSet = formset_factory(PlanCellForm, extra=0)
for row in rows:
product = row[0]
row_form = PlanRowForm(data, prefix=product.id, initial={'product_id': product.id})
row_form.product = product.long_name
cells = row[1:len(row)]
initial_data = []
for cell in cells:
plan_id = ""
if cell.plan:
plan_id = cell.plan.id
dict = {
'plan_id': plan_id,
'product_id': cell.product.id,
'from_date': cell.from_date,
'to_date': cell.to_date,
'quantity': cell.quantity,
}
initial_data.append(dict)
row_form.formset = PlanCellFormSet(data, prefix=product.id, initial=initial_data)
form_list.append(row_form)
return form_list
class SupplyDemandTable(object):
def __init__(self, columns, rows):
self.columns = columns
self.rows = rows
def supply_demand_table(from_date, to_date, member=None):
plans = ProductPlan.objects.all()
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
constants = {}
for cp in cps:
constants.setdefault(cp.product, Decimal("0"))
constants[cp.product] += cp.default_avail_qty
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
product = plan.product.supply_demand_product()
constant = Decimal('0')
cp = constants.get(product)
if cp:
constant = cp
row = []
while wkdate <= to_date:
row.append(constant)
wkdate = wkdate + datetime.timedelta(days=7)
row.insert(0, product)
rows.setdefault(product, row)
wkdate = from_date
week = 0
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][week + 1] += plan.quantity
else:
rows[product][week + 1] -= plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
label = "Product/Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def supply_demand_rows(from_date, to_date, member=None):
plans = ProductPlan.objects.select_related(depth=1).all()
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
constants = {}
rows = {}
#import pdb; pdb.set_trace()
#todo: what if some NIPs and some inventoried for same product?
#does code does allow for that?
for cp in cps:
constants.setdefault(cp.product, Decimal("0"))
constant = cp.default_avail_qty
product = cp.product
constants[product] += constant
row = {}
row["product"] = product.long_name
row["id"] = product.id
rows.setdefault(product, row)
wkdate = from_date
while wkdate <= to_date:
row[wkdate.strftime('%Y_%m_%d')] = str(constant)
wkdate = wkdate + datetime.timedelta(days=7)
if member:
plans = plans.filter(member=member)
#todo:
# spread storage items over many weeks
# if plan.product expiration_days > 1 week:
# spread remainder over weeks until consumed or expired.
# means plannable parents cd determine expiration.
# may require another pass thru storage plans...
for plan in plans:
wkdate = from_date
#this is too slow:
#product = plan.product.supply_demand_product()
product = plan.product
#constant = Decimal('0')
#constant = ""
#cp = constants.get(product)
#if cp:
# constant = str(cp)
row = {}
#while wkdate <= to_date:
# row[wkdate.strftime('%Y_%m_%d')] = str(constant)
# wkdate = wkdate + datetime.timedelta(days=7)
row["product"] = product.long_name
row["id"] = product.id
rows.setdefault(product, row)
#import pdb; pdb.set_trace()
wkdate = from_date
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
key = wkdate.strftime('%Y_%m_%d')
try:
value = rows[product][key]
except KeyError:
value = Decimal("0")
if value == "":
value = Decimal("0")
else:
value = Decimal(value)
if plan.role == "producer":
value += plan.quantity
else:
value -= plan.quantity
rows[product][key] = str(value)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return rows
def supply_demand_weekly_table(week_date):
plans = ProductPlan.objects.filter(
from_date__lte=week_date,
to_date__gte=week_date,
).order_by("-role", "member__short_name")
columns = []
rows = {}
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
for cp in cps:
if not cp.producer in columns:
columns.append(cp.producer)
for plan in plans:
if not plan.member in columns:
columns.append(plan.member)
columns.insert(0, "Product\Member")
columns.append("Balance")
for cp in cps:
if not rows.get(cp.product):
row = []
for i in range(0, len(columns)-1):
row.append(Decimal("0"))
row.insert(0, cp.product)
rows[cp.product] = row
rows[cp.product][columns.index(cp.producer)] += cp.default_avail_qty
rows[cp.product][len(columns)-1] += cp.default_avail_qty
for plan in plans:
if not rows.get(plan.product):
row = []
for i in range(0, len(columns)-1):
row.append(Decimal("0"))
row.insert(0, plan.product)
rows[plan.product] = row
if plan.role == "producer":
rows[plan.product][columns.index(plan.member)] += plan.quantity
rows[plan.product][len(columns)-1] += plan.quantity
else:
rows[plan.product][columns.index(plan.member)] -= plan.quantity
rows[plan.product][len(columns)-1] -= plan.quantity
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def dojo_supply_demand_weekly_table(week_date):
plans = ProductPlan.objects.filter(
from_date__lte=week_date,
to_date__gte=week_date,
).order_by("-role", "member__short_name")
# for columns: product, member.short_name(s), balance
# but only members are needed here...product and balance can be added in
# template
# for rows: dictionaries with the above keys
columns = []
rows = {}
cps = ProducerProduct.objects.filter(
inventoried=False,
default_avail_qty__gt=0,
)
for cp in cps:
if not cp.producer in columns:
columns.append(cp.producer.short_name)
for plan in plans:
if not plan.member.short_name in columns:
columns.append(plan.member.short_name)
columns.append("Balance")
for cp in cps:
if not rows.get(cp.product):
row = {}
for column in columns:
row[column] = 0
row["product"] = cp.product.long_name
row["id"] = cp.product.id
row["Balance"] = 0
rows[cp.product] = row
rows[cp.product][cp.producer.short_name] += int(cp.default_avail_qty)
rows[cp.product]["Balance"] += int(cp.default_avail_qty)
for plan in plans:
if not rows.get(plan.product):
row = {}
for column in columns:
row[column] = 0
row["product"] = plan.product.long_name
row["id"] = plan.product.id
row["Balance"] = 0
rows[plan.product] = row
if plan.role == "producer":
rows[plan.product][plan.member.short_name] += int(plan.quantity)
rows[plan.product]["Balance"] += int(plan.quantity)
else:
rows[plan.product][plan.member.short_name] -= int(plan.quantity)
rows[plan.product]["Balance"] -= int(plan.quantity)
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
class SuppliableDemandCell(object):
def __init__(self, supply, demand):
self.supply = supply
self.demand = demand
def suppliable(self):
answer = Decimal("0")
if self.supply and self.demand:
if self.supply > self.demand:
answer = self.demand
else:
answer = self.supply
return answer
def suppliable_demand(from_date, to_date, member=None):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.all()
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
row = []
while wkdate <= to_date:
row.append(SuppliableDemandCell(Decimal("0"), Decimal("0")))
wkdate = wkdate + datetime.timedelta(days=7)
product = plan.product.supply_demand_product()
row.insert(0, product)
rows.setdefault(product, row)
wkdate = from_date
week = 0
while wkdate <= to_date:
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][week + 1].supply += plan.quantity
else:
rows[product][week + 1].demand += plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
rows = rows.values()
cust_fee = customer_fee()
for row in rows:
for x in range(1, len(row)):
sd = row[x].suppliable()
if sd >= 0:
income = sd * row[0].price
row[x] = income
else:
row[x] = Decimal("0")
income_rows = []
for row in rows:
base = Decimal("0")
total = Decimal("0")
for x in range(1, len(row)):
cell = row[x]
base += cell
cell += cell * cust_fee
total += cell
row[x] = cell.quantize(Decimal('.1'), rounding=ROUND_UP)
if total:
net = base * cust_fee + (base * producer_fee())
net = net.quantize(Decimal('1.'), rounding=ROUND_UP)
total = total.quantize(Decimal('1.'), rounding=ROUND_UP)
row.append(total)
row.append(net)
income_rows.append(row)
label = "Item\Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
columns.append("Total")
columns.append("Net")
income_rows.sort(lambda x, y: cmp(x[0].long_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, income_rows)
return sdtable
#todo: does not use contants (NIPs)
#or correct logic for storage items
def json_income_rows(from_date, to_date, member=None):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.all()
if member:
plans = plans.filter(member=member)
rows = {}
for plan in plans:
wkdate = from_date
row = {}
while wkdate <= to_date:
row[wkdate.strftime('%Y_%m_%d')] = SuppliableDemandCell(Decimal("0"), Decimal("0"))
wkdate = wkdate + datetime.timedelta(days=7)
product = plan.product.supply_demand_product()
row["product"] = product.long_name
row["id"] = product.id
row["price"] = product.price
rows.setdefault(product, row)
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
if plan.from_date <= wkdate and plan.to_date >= wkdate:
if plan.role == "producer":
rows[product][key].supply += plan.quantity
else:
rows[product][key].demand += plan.quantity
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
cust_fee = customer_fee()
#import pdb; pdb.set_trace()
for row in rows:
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
sd = row[key].suppliable()
if sd > 0:
income = sd * row["price"]
row[key] = income
else:
row[key] = Decimal("0")
wkdate = wkdate + datetime.timedelta(days=7)
income_rows = []
for row in rows:
base = Decimal("0")
total = Decimal("0")
wkdate = from_date
while wkdate <= to_date:
key = wkdate.strftime('%Y_%m_%d')
cell = row[key]
base += cell
cell += cell * cust_fee
total += cell
row[key] = str(cell.quantize(Decimal('.1'), rounding=ROUND_UP))
wkdate = wkdate + datetime.timedelta(days=7)
if total:
net = base * cust_fee + (base * producer_fee())
net = net.quantize(Decimal('1.'), rounding=ROUND_UP)
total = total.quantize(Decimal('1.'), rounding=ROUND_UP)
row["total"] = str(total)
row["net"] = str(net)
row["price"] = str(row["price"])
income_rows.append(row)
income_rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return income_rows
class PlannedWeek(object):
def __init__(self, product, from_date, to_date, quantity):
self.product = product
self.from_date = from_date
self.to_date = to_date
self.quantity = quantity
self.plan = None
def plan_weeks(member, products, from_date, to_date):
plans = ProductPlan.objects.filter(member=member)
#if member.is_customer():
# products = CustomerProduct.objects.filter(customer=member, planned=True)
#else:
# products = ProducerProduct.objects.filter(producer=member, planned=True)
#if not products:
# products = Product.objects.filter(plannable=True)
rows = {}
for pp in products:
try:
product = pp.product
except:
product = pp
wkdate = from_date
row = [product]
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
row.append(PlannedWeek(product, wkdate, enddate, Decimal("0")))
wkdate = enddate + datetime.timedelta(days=1)
#row.insert(0, product)
rows.setdefault(product, row)
for plan in plans:
product = plan.product
wkdate = from_date
week = 0
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
if plan.from_date <= wkdate and plan.to_date >= wkdate:
rows[product][week + 1].quantity = plan.quantity
rows[product][week + 1].plan = plan
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
label = "Product/Weeks"
columns = [label]
wkdate = from_date
while wkdate <= to_date:
columns.append(wkdate)
wkdate = wkdate + datetime.timedelta(days=7)
rows = rows.values()
rows.sort(lambda x, y: cmp(x[0].short_name, y[0].short_name))
sdtable = SupplyDemandTable(columns, rows)
return sdtable
def plans_for_dojo(member, products, from_date, to_date):
#import pdb; pdb.set_trace()
plans = ProductPlan.objects.filter(member=member)
rows = {}
for pp in products:
yearly = 0
try:
product = pp.product
yearly = pp.default_quantity
except:
product = pp
if not yearly:
try:
pp = ProducerProduct.objects.get(producer=member, product=product)
yearly = pp.default_quantity
except:
pass
wkdate = from_date
row = {}
row["product"] = product.long_name
row["yearly"] = int(yearly)
row["id"] = product.id
row["member_id"] = member.id
row["from_date"] = from_date.strftime('%Y-%m-%d')
row["to_date"] = to_date.strftime('%Y-%m-%d')
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
row[wkdate.strftime('%Y-%m-%d')] = "0"
wkdate = enddate + datetime.timedelta(days=1)
rows.setdefault(product, row)
#import pdb; pdb.set_trace()
for plan in plans:
product = plan.product
wkdate = from_date
week = 0
while wkdate <= to_date:
enddate = wkdate + datetime.timedelta(days=6)
if plan.from_date <= wkdate and plan.to_date >= wkdate:
rows[product][wkdate.strftime('%Y-%m-%d')] = str(plan.quantity)
rows[product][":".join([wkdate.strftime('%Y-%m-%d'), "plan_id"])] = plan.id
wkdate = wkdate + datetime.timedelta(days=7)
week += 1
rows = rows.values()
rows.sort(lambda x, y: cmp(x["product"], y["product"]))
return rows
def create_all_inventory_item_forms(avail_date, plans, items, data=None):
item_dict = {}
for item in items:
# This means one lot per producer per product per week
item_dict["-".join([str(item.product.id), str(item.producer.id)])] = item
form_list = []
for plan in plans:
#import pdb; pdb.set_trace()
custodian_id = ""
try:
member = plan.member
except:
member = plan.producer
try:
item = item_dict["-".join([str(plan.product.id),
str(member.id)])]
if item.custodian:
custodian_id = item.custodian.id
except KeyError:
item = False
try:
plan_qty = plan.quantity
except:
plan_qty = 0
#import pdb; pdb.set_trace()
if item:
pref = "-".join(["item", str(item.id)])
the_form = AllInventoryItemForm(data, prefix=pref, initial={
'item_id': item.id,
'product_id': item.product.id,
'producer_id': item.producer.id,
'freeform_lot_id': item.freeform_lot_id,
'field_id': item.field_id,
'custodian': custodian_id,
'inventory_date': item.inventory_date,
'expiration_date': item.expiration_date,
'planned': item.planned,
'received': item.received,
'notes': item.notes})
else:
pref = "-".join(["plan", str(plan.id)])
expiration_date = avail_date + datetime.timedelta(days=plan.product.expiration_days)
the_form = AllInventoryItemForm(data, prefix=pref, initial={
'item_id': 0,
'product_id': plan.product.id,
'producer_id': member.id,
'inventory_date': avail_date,
'expiration_date': expiration_date,
'planned': 0,
'received': 0,
'notes': ''})
the_form.description = plan.product.long_name
the_form.producer = member.short_name
the_form.plan_qty = plan_qty
form_list.append(the_form)
#import pdb; pdb.set_trace()
#form_list.sort(lambda x, y: cmp(x.producer, y.producer))
form_list = sorted(form_list, key=attrgetter('producer', 'description'))
return form_list
def create_delivery_cycle_selection_forms(data=None):
dcs = DeliveryCycle.objects.all()
form_list = []
for dc in dcs:
form = DeliveryCycleSelectionForm(data, prefix=dc.id)
form.cycle = dc
form.delivery_date = dc.next_delivery_date_using_closing()
form_list.append(form)
return form_list
def create_avail_item_forms(avail_date, data=None):
fn = food_network()
items = fn.avail_items_for_customer(avail_date)
form_list = []
for item in items:
pref = "-".join(["item", str(item.id)])
the_form = AvailableItemForm(data, prefix=pref, initial={
'item_id': item.id,
'inventory_date': item.inventory_date,
'expiration_date': item.expiration_date,
'quantity': item.avail_qty(),
})
the_form.description = item.product.long_name
the_form.producer = item.producer.short_name
the_form.ordered = item.product.total_ordered_for_timespan(
item.inventory_date, item.expiration_date)
form_list.append(the_form)
form_list = sorted(form_list, key=attrgetter('description', 'producer'))
return form_list
def send_avail_emails(cycle):
fn = food_network()
food_network_name = fn.long_name
delivery_date = cycle.next_delivery_date_using_closing()
fresh_list = fn.email_availability(delivery_date)
users = []
for customer in cycle.customers.all():
users.append(customer)
for contact in customer.contacts.all():
if contact.email != customer.email:
users.append(contact)
users.append(fn)
users = list(set(users))
intro = avail_email_intro()
domain = Site.objects.get_current().domain
notification.send(users, "distribution_fresh_list", {
"intro": intro.message,
"domain": domain,
"fresh_list": fresh_list,
"delivery_date": delivery_date,
"food_network_name": food_network_name,
"cycle": cycle,
})
| bhaugen/foodnetwork | distribution/view_helpers.py | Python | mit | 25,038 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import batch_prediction_job
from google.cloud.aiplatform_v1.types import (
batch_prediction_job as gca_batch_prediction_job,
)
from google.cloud.aiplatform_v1.types import custom_job
from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
from google.cloud.aiplatform_v1.types import data_labeling_job
from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
from google.cloud.aiplatform_v1.types import (
hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
)
from google.cloud.aiplatform_v1.types import job_service
from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job
from google.cloud.aiplatform_v1.types import (
model_deployment_monitoring_job as gca_model_deployment_monitoring_job,
)
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import JobServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import JobServiceGrpcTransport
class JobServiceGrpcAsyncIOTransport(JobServiceTransport):
"""gRPC AsyncIO backend transport for JobService.
A service for creating and managing Vertex AI's jobs.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_custom_job(
self,
) -> Callable[
[job_service.CreateCustomJobRequest], Awaitable[gca_custom_job.CustomJob]
]:
r"""Return a callable for the create custom job method over gRPC.
Creates a CustomJob. A created CustomJob right away
will be attempted to be run.
Returns:
Callable[[~.CreateCustomJobRequest],
Awaitable[~.CustomJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_custom_job" not in self._stubs:
self._stubs["create_custom_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CreateCustomJob",
request_serializer=job_service.CreateCustomJobRequest.serialize,
response_deserializer=gca_custom_job.CustomJob.deserialize,
)
return self._stubs["create_custom_job"]
@property
def get_custom_job(
self,
) -> Callable[[job_service.GetCustomJobRequest], Awaitable[custom_job.CustomJob]]:
r"""Return a callable for the get custom job method over gRPC.
Gets a CustomJob.
Returns:
Callable[[~.GetCustomJobRequest],
Awaitable[~.CustomJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_custom_job" not in self._stubs:
self._stubs["get_custom_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/GetCustomJob",
request_serializer=job_service.GetCustomJobRequest.serialize,
response_deserializer=custom_job.CustomJob.deserialize,
)
return self._stubs["get_custom_job"]
@property
def list_custom_jobs(
self,
) -> Callable[
[job_service.ListCustomJobsRequest],
Awaitable[job_service.ListCustomJobsResponse],
]:
r"""Return a callable for the list custom jobs method over gRPC.
Lists CustomJobs in a Location.
Returns:
Callable[[~.ListCustomJobsRequest],
Awaitable[~.ListCustomJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_custom_jobs" not in self._stubs:
self._stubs["list_custom_jobs"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/ListCustomJobs",
request_serializer=job_service.ListCustomJobsRequest.serialize,
response_deserializer=job_service.ListCustomJobsResponse.deserialize,
)
return self._stubs["list_custom_jobs"]
@property
def delete_custom_job(
self,
) -> Callable[
[job_service.DeleteCustomJobRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete custom job method over gRPC.
Deletes a CustomJob.
Returns:
Callable[[~.DeleteCustomJobRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_custom_job" not in self._stubs:
self._stubs["delete_custom_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/DeleteCustomJob",
request_serializer=job_service.DeleteCustomJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_custom_job"]
@property
def cancel_custom_job(
self,
) -> Callable[[job_service.CancelCustomJobRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the cancel custom job method over gRPC.
Cancels a CustomJob. Starts asynchronous cancellation on the
CustomJob. The server makes a best effort to cancel the job, but
success is not guaranteed. Clients can use
[JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On successful
cancellation, the CustomJob is not deleted; instead it becomes a
job with a
[CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is
set to ``CANCELLED``.
Returns:
Callable[[~.CancelCustomJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_custom_job" not in self._stubs:
self._stubs["cancel_custom_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CancelCustomJob",
request_serializer=job_service.CancelCustomJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_custom_job"]
@property
def create_data_labeling_job(
self,
) -> Callable[
[job_service.CreateDataLabelingJobRequest],
Awaitable[gca_data_labeling_job.DataLabelingJob],
]:
r"""Return a callable for the create data labeling job method over gRPC.
Creates a DataLabelingJob.
Returns:
Callable[[~.CreateDataLabelingJobRequest],
Awaitable[~.DataLabelingJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_data_labeling_job" not in self._stubs:
self._stubs["create_data_labeling_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob",
request_serializer=job_service.CreateDataLabelingJobRequest.serialize,
response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize,
)
return self._stubs["create_data_labeling_job"]
@property
def get_data_labeling_job(
self,
) -> Callable[
[job_service.GetDataLabelingJobRequest],
Awaitable[data_labeling_job.DataLabelingJob],
]:
r"""Return a callable for the get data labeling job method over gRPC.
Gets a DataLabelingJob.
Returns:
Callable[[~.GetDataLabelingJobRequest],
Awaitable[~.DataLabelingJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_data_labeling_job" not in self._stubs:
self._stubs["get_data_labeling_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob",
request_serializer=job_service.GetDataLabelingJobRequest.serialize,
response_deserializer=data_labeling_job.DataLabelingJob.deserialize,
)
return self._stubs["get_data_labeling_job"]
@property
def list_data_labeling_jobs(
self,
) -> Callable[
[job_service.ListDataLabelingJobsRequest],
Awaitable[job_service.ListDataLabelingJobsResponse],
]:
r"""Return a callable for the list data labeling jobs method over gRPC.
Lists DataLabelingJobs in a Location.
Returns:
Callable[[~.ListDataLabelingJobsRequest],
Awaitable[~.ListDataLabelingJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_data_labeling_jobs" not in self._stubs:
self._stubs["list_data_labeling_jobs"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs",
request_serializer=job_service.ListDataLabelingJobsRequest.serialize,
response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize,
)
return self._stubs["list_data_labeling_jobs"]
@property
def delete_data_labeling_job(
self,
) -> Callable[
[job_service.DeleteDataLabelingJobRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete data labeling job method over gRPC.
Deletes a DataLabelingJob.
Returns:
Callable[[~.DeleteDataLabelingJobRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_data_labeling_job" not in self._stubs:
self._stubs["delete_data_labeling_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob",
request_serializer=job_service.DeleteDataLabelingJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_data_labeling_job"]
@property
def cancel_data_labeling_job(
self,
) -> Callable[
[job_service.CancelDataLabelingJobRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the cancel data labeling job method over gRPC.
Cancels a DataLabelingJob. Success of cancellation is
not guaranteed.
Returns:
Callable[[~.CancelDataLabelingJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_data_labeling_job" not in self._stubs:
self._stubs["cancel_data_labeling_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob",
request_serializer=job_service.CancelDataLabelingJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_data_labeling_job"]
@property
def create_hyperparameter_tuning_job(
self,
) -> Callable[
[job_service.CreateHyperparameterTuningJobRequest],
Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob],
]:
r"""Return a callable for the create hyperparameter tuning
job method over gRPC.
Creates a HyperparameterTuningJob
Returns:
Callable[[~.CreateHyperparameterTuningJobRequest],
Awaitable[~.HyperparameterTuningJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_hyperparameter_tuning_job" not in self._stubs:
self._stubs[
"create_hyperparameter_tuning_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob",
request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize,
response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize,
)
return self._stubs["create_hyperparameter_tuning_job"]
@property
def get_hyperparameter_tuning_job(
self,
) -> Callable[
[job_service.GetHyperparameterTuningJobRequest],
Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob],
]:
r"""Return a callable for the get hyperparameter tuning job method over gRPC.
Gets a HyperparameterTuningJob
Returns:
Callable[[~.GetHyperparameterTuningJobRequest],
Awaitable[~.HyperparameterTuningJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_hyperparameter_tuning_job" not in self._stubs:
self._stubs[
"get_hyperparameter_tuning_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob",
request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize,
response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize,
)
return self._stubs["get_hyperparameter_tuning_job"]
@property
def list_hyperparameter_tuning_jobs(
self,
) -> Callable[
[job_service.ListHyperparameterTuningJobsRequest],
Awaitable[job_service.ListHyperparameterTuningJobsResponse],
]:
r"""Return a callable for the list hyperparameter tuning
jobs method over gRPC.
Lists HyperparameterTuningJobs in a Location.
Returns:
Callable[[~.ListHyperparameterTuningJobsRequest],
Awaitable[~.ListHyperparameterTuningJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_hyperparameter_tuning_jobs" not in self._stubs:
self._stubs[
"list_hyperparameter_tuning_jobs"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs",
request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize,
response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize,
)
return self._stubs["list_hyperparameter_tuning_jobs"]
@property
def delete_hyperparameter_tuning_job(
self,
) -> Callable[
[job_service.DeleteHyperparameterTuningJobRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete hyperparameter tuning
job method over gRPC.
Deletes a HyperparameterTuningJob.
Returns:
Callable[[~.DeleteHyperparameterTuningJobRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_hyperparameter_tuning_job" not in self._stubs:
self._stubs[
"delete_hyperparameter_tuning_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob",
request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_hyperparameter_tuning_job"]
@property
def cancel_hyperparameter_tuning_job(
self,
) -> Callable[
[job_service.CancelHyperparameterTuningJobRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the cancel hyperparameter tuning
job method over gRPC.
Cancels a HyperparameterTuningJob. Starts asynchronous
cancellation on the HyperparameterTuningJob. The server makes a
best effort to cancel the job, but success is not guaranteed.
Clients can use
[JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On successful
cancellation, the HyperparameterTuningJob is not deleted;
instead it becomes a job with a
[HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state]
is set to ``CANCELLED``.
Returns:
Callable[[~.CancelHyperparameterTuningJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_hyperparameter_tuning_job" not in self._stubs:
self._stubs[
"cancel_hyperparameter_tuning_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob",
request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_hyperparameter_tuning_job"]
@property
def create_batch_prediction_job(
self,
) -> Callable[
[job_service.CreateBatchPredictionJobRequest],
Awaitable[gca_batch_prediction_job.BatchPredictionJob],
]:
r"""Return a callable for the create batch prediction job method over gRPC.
Creates a BatchPredictionJob. A BatchPredictionJob
once created will right away be attempted to start.
Returns:
Callable[[~.CreateBatchPredictionJobRequest],
Awaitable[~.BatchPredictionJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_batch_prediction_job" not in self._stubs:
self._stubs["create_batch_prediction_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob",
request_serializer=job_service.CreateBatchPredictionJobRequest.serialize,
response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize,
)
return self._stubs["create_batch_prediction_job"]
@property
def get_batch_prediction_job(
self,
) -> Callable[
[job_service.GetBatchPredictionJobRequest],
Awaitable[batch_prediction_job.BatchPredictionJob],
]:
r"""Return a callable for the get batch prediction job method over gRPC.
Gets a BatchPredictionJob
Returns:
Callable[[~.GetBatchPredictionJobRequest],
Awaitable[~.BatchPredictionJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_batch_prediction_job" not in self._stubs:
self._stubs["get_batch_prediction_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob",
request_serializer=job_service.GetBatchPredictionJobRequest.serialize,
response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize,
)
return self._stubs["get_batch_prediction_job"]
@property
def list_batch_prediction_jobs(
self,
) -> Callable[
[job_service.ListBatchPredictionJobsRequest],
Awaitable[job_service.ListBatchPredictionJobsResponse],
]:
r"""Return a callable for the list batch prediction jobs method over gRPC.
Lists BatchPredictionJobs in a Location.
Returns:
Callable[[~.ListBatchPredictionJobsRequest],
Awaitable[~.ListBatchPredictionJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_batch_prediction_jobs" not in self._stubs:
self._stubs["list_batch_prediction_jobs"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs",
request_serializer=job_service.ListBatchPredictionJobsRequest.serialize,
response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize,
)
return self._stubs["list_batch_prediction_jobs"]
@property
def delete_batch_prediction_job(
self,
) -> Callable[
[job_service.DeleteBatchPredictionJobRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete batch prediction job method over gRPC.
Deletes a BatchPredictionJob. Can only be called on
jobs that already finished.
Returns:
Callable[[~.DeleteBatchPredictionJobRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_batch_prediction_job" not in self._stubs:
self._stubs["delete_batch_prediction_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob",
request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_batch_prediction_job"]
@property
def cancel_batch_prediction_job(
self,
) -> Callable[
[job_service.CancelBatchPredictionJobRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the cancel batch prediction job method over gRPC.
Cancels a BatchPredictionJob.
Starts asynchronous cancellation on the BatchPredictionJob. The
server makes the best effort to cancel the job, but success is
not guaranteed. Clients can use
[JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On a successful
cancellation, the BatchPredictionJob is not deleted;instead its
[BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state]
is set to ``CANCELLED``. Any files already outputted by the job
are not deleted.
Returns:
Callable[[~.CancelBatchPredictionJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_batch_prediction_job" not in self._stubs:
self._stubs["cancel_batch_prediction_job"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob",
request_serializer=job_service.CancelBatchPredictionJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["cancel_batch_prediction_job"]
@property
def create_model_deployment_monitoring_job(
self,
) -> Callable[
[job_service.CreateModelDeploymentMonitoringJobRequest],
Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob],
]:
r"""Return a callable for the create model deployment
monitoring job method over gRPC.
Creates a ModelDeploymentMonitoringJob. It will run
periodically on a configured interval.
Returns:
Callable[[~.CreateModelDeploymentMonitoringJobRequest],
Awaitable[~.ModelDeploymentMonitoringJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_model_deployment_monitoring_job" not in self._stubs:
self._stubs[
"create_model_deployment_monitoring_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob",
request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize,
response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize,
)
return self._stubs["create_model_deployment_monitoring_job"]
@property
def search_model_deployment_monitoring_stats_anomalies(
self,
) -> Callable[
[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest],
Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse],
]:
r"""Return a callable for the search model deployment
monitoring stats anomalies method over gRPC.
Searches Model Monitoring Statistics generated within
a given time window.
Returns:
Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest],
Awaitable[~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_model_deployment_monitoring_stats_anomalies" not in self._stubs:
self._stubs[
"search_model_deployment_monitoring_stats_anomalies"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies",
request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize,
response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize,
)
return self._stubs["search_model_deployment_monitoring_stats_anomalies"]
@property
def get_model_deployment_monitoring_job(
self,
) -> Callable[
[job_service.GetModelDeploymentMonitoringJobRequest],
Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob],
]:
r"""Return a callable for the get model deployment
monitoring job method over gRPC.
Gets a ModelDeploymentMonitoringJob.
Returns:
Callable[[~.GetModelDeploymentMonitoringJobRequest],
Awaitable[~.ModelDeploymentMonitoringJob]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_model_deployment_monitoring_job" not in self._stubs:
self._stubs[
"get_model_deployment_monitoring_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob",
request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize,
response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize,
)
return self._stubs["get_model_deployment_monitoring_job"]
@property
def list_model_deployment_monitoring_jobs(
self,
) -> Callable[
[job_service.ListModelDeploymentMonitoringJobsRequest],
Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse],
]:
r"""Return a callable for the list model deployment
monitoring jobs method over gRPC.
Lists ModelDeploymentMonitoringJobs in a Location.
Returns:
Callable[[~.ListModelDeploymentMonitoringJobsRequest],
Awaitable[~.ListModelDeploymentMonitoringJobsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_model_deployment_monitoring_jobs" not in self._stubs:
self._stubs[
"list_model_deployment_monitoring_jobs"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs",
request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize,
response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize,
)
return self._stubs["list_model_deployment_monitoring_jobs"]
@property
def update_model_deployment_monitoring_job(
self,
) -> Callable[
[job_service.UpdateModelDeploymentMonitoringJobRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the update model deployment
monitoring job method over gRPC.
Updates a ModelDeploymentMonitoringJob.
Returns:
Callable[[~.UpdateModelDeploymentMonitoringJobRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_model_deployment_monitoring_job" not in self._stubs:
self._stubs[
"update_model_deployment_monitoring_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob",
request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_model_deployment_monitoring_job"]
@property
def delete_model_deployment_monitoring_job(
self,
) -> Callable[
[job_service.DeleteModelDeploymentMonitoringJobRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete model deployment
monitoring job method over gRPC.
Deletes a ModelDeploymentMonitoringJob.
Returns:
Callable[[~.DeleteModelDeploymentMonitoringJobRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_model_deployment_monitoring_job" not in self._stubs:
self._stubs[
"delete_model_deployment_monitoring_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob",
request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_model_deployment_monitoring_job"]
@property
def pause_model_deployment_monitoring_job(
self,
) -> Callable[
[job_service.PauseModelDeploymentMonitoringJobRequest],
Awaitable[empty_pb2.Empty],
]:
r"""Return a callable for the pause model deployment
monitoring job method over gRPC.
Pauses a ModelDeploymentMonitoringJob. If the job is running,
the server makes a best effort to cancel the job. Will mark
[ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state]
to 'PAUSED'.
Returns:
Callable[[~.PauseModelDeploymentMonitoringJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "pause_model_deployment_monitoring_job" not in self._stubs:
self._stubs[
"pause_model_deployment_monitoring_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob",
request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["pause_model_deployment_monitoring_job"]
@property
def resume_model_deployment_monitoring_job(
self,
) -> Callable[
[job_service.ResumeModelDeploymentMonitoringJobRequest],
Awaitable[empty_pb2.Empty],
]:
r"""Return a callable for the resume model deployment
monitoring job method over gRPC.
Resumes a paused ModelDeploymentMonitoringJob. It
will start to run from next scheduled time. A deleted
ModelDeploymentMonitoringJob can't be resumed.
Returns:
Callable[[~.ResumeModelDeploymentMonitoringJobRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resume_model_deployment_monitoring_job" not in self._stubs:
self._stubs[
"resume_model_deployment_monitoring_job"
] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob",
request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["resume_model_deployment_monitoring_job"]
def close(self):
return self.grpc_channel.close()
__all__ = ("JobServiceGrpcAsyncIOTransport",)
| googleapis/python-aiplatform | google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py | Python | apache-2.0 | 51,633 |
default_app_config = "posts.apps.PostsConfig"
| nijel/photoblog | posts/__init__.py | Python | agpl-3.0 | 46 |
import re
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import PyQt4.QtCore as QtCore
import ui_celltypedlg
import sys
import string
MAC = "qt_mac_set_native_menubar" in dir()
class CellTypeDlg(QDialog,ui_celltypedlg.Ui_CellTypeDlg):
#signals
# gotolineSignal = QtCore.pyqtSignal( ('int',))
def __init__(self,_currentEditor=None,parent=None):
super(CellTypeDlg, self).__init__(parent)
self.editorWindow=parent
self.setupUi(self)
if not MAC:
self.cancelPB.setFocusPolicy(Qt.NoFocus)
self.updateUi()
def keyPressEvent(self, event):
cellType=str(self.cellTypeLE.text())
cellType=string.rstrip(cellType)
if event.key()==Qt.Key_Return :
if cellType!="":
self.on_cellTypeAddPB_clicked()
event.accept()
@pyqtSignature("") # signature of the signal emited by the button
def on_cellTypeAddPB_clicked(self):
cellType=str(self.cellTypeLE.text())
cellType=string.rstrip(cellType)
rows=self.cellTypeTable.rowCount()
if cellType =="":
return
# check if cell type with this name already exist
cellTypeAlreadyExists=False
for rowId in range(rows):
name=str(self.cellTypeTable.item(rowId,0).text())
name=string.rstrip(name)
print "CHECKING name=",name+"1"," type=",cellType+"1"
print "name==cellType ",name==cellType
if name==cellType:
cellTypeAlreadyExists=True
break
print "cellTypeAlreadyExists=",cellTypeAlreadyExists
if cellTypeAlreadyExists:
print "WARNING"
QMessageBox.warning(self,"Cell type name already exists","Cell type name already exist. Please choose different name",QMessageBox.Ok)
return
self.cellTypeTable.insertRow(rows)
cellTypeItem=QTableWidgetItem(cellType)
self.cellTypeTable.setItem (rows,0, cellTypeItem)
cellTypeFreezeItem=QTableWidgetItem()
cellTypeFreezeItem.data(Qt.CheckStateRole)
if self.freezeCHB.isChecked():
cellTypeFreezeItem.setCheckState(Qt.Checked)
else:
cellTypeFreezeItem.setCheckState(Qt.Unchecked)
self.cellTypeTable.setItem (rows,1, cellTypeFreezeItem)
# reset cell type entry line
self.cellTypeLE.setText("")
return
@pyqtSignature("") # signature of the signal emited by the button
def on_clearCellTypeTablePB_clicked(self):
rows=self.cellTypeTable.rowCount()
for i in range (rows-1,-1,-1):
self.cellTypeTable.removeRow(i)
#insert Medium
self.cellTypeTable.insertRow(0)
mediumItem=QTableWidgetItem("Medium")
self.cellTypeTable.setItem (0,0, mediumItem)
mediumFreezeItem=QTableWidgetItem()
mediumFreezeItem.data(Qt.CheckStateRole)
mediumFreezeItem.setCheckState(Qt.Unchecked)
self.cellTypeTable.setItem (0,1, mediumFreezeItem)
def extractInformation(self):
cellTypeDict={}
for row in range(self.cellTypeTable.rowCount()):
type=str(self.cellTypeTable.item(row,0).text())
freeze=False
if self.cellTypeTable.item(row,1).checkState()==Qt.Checked:
print "self.cellTypeTable.item(row,1).checkState()=",self.cellTypeTable.item(row,1).checkState()
freeze=True
cellTypeDict[row]=[type,freeze]
return cellTypeDict
def updateUi(self):
self.cellTypeTable.insertRow(0)
mediumItem=QTableWidgetItem("Medium")
self.cellTypeTable.setItem (0,0, mediumItem)
mediumFreezeItem=QTableWidgetItem()
mediumFreezeItem.data(Qt.CheckStateRole)
mediumFreezeItem.setCheckState(Qt.Unchecked)
self.cellTypeTable.setItem (0,1, mediumFreezeItem)
baseSize=self.cellTypeTable.baseSize()
self.cellTypeTable.setColumnWidth (0,baseSize.width()/2)
self.cellTypeTable.setColumnWidth (1,baseSize.width()/2)
self.cellTypeTable.horizontalHeader().setStretchLastSection(True)
| maciekswat/Twedit | Plugins/CC3DMLHelper/celltypedlg.py | Python | gpl-3.0 | 4,555 |
from PySide import QtGui, QtCore
from port import Port
from wire import Wire
class Node(QtGui.QGraphicsItem):
NodeTopPadding = 40
NodeBottomPadding = 20
def __init__(self, name, deviceClass):
super(Node, self).__init__()
self.name = name
self.deviceClass = deviceClass
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setFlag(QtGui.QGraphicsItem.ItemSendsScenePositionChanges)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable)
self.ports = []
self.brushes = {}
self.brushes['background'] = QtGui.QBrush(QtGui.QColor(107, 107, 107),
QtCore.Qt.SolidPattern)
self.colors = {}
self.colors['normalBorder'] = QtGui.QColor(0, 0, 0)
self.colors['selectedBorder'] = QtGui.QColor(200, 120, 10)
self.sizeBackground()
self.device = None
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemPositionChange and self.scene():
for wire in self.wires():
wire.prepareGeometryChange()
self.scene().update()
return QtGui.QGraphicsItem.itemChange(self, change, value)
def setDevice(self, device):
self.device = device
for port in device.ports:
self.createPort(port['name'], port['media_type'], port['direction'])
def createPort(self, name, media_type, direction='in'):
port = Port(name, media_type, direction)
self.ports.append(port)
self.prepareGeometryChange()
port.setParentItem(self)
self.arrangePorts()
self.sizeBackground()
def inPorts(self):
return [p for p in self.ports if p.direction == 'in']
def outPorts(self):
return [p for p in self.ports if p.direction == 'out']
def arrangePorts(self):
for i, port in enumerate(self.inPorts()):
port.setPos(0, Port.PortHeight * i + self.NodeTopPadding)
for i, port in enumerate(self.outPorts()):
port.setPos(150, Port.PortHeight * i + self.NodeTopPadding)
def sizeBackground(self):
numPortsTall = max(len(self.inPorts()), len(self.outPorts()))
self.backgroundHeight = (self.NodeTopPadding +
self.NodeBottomPadding +
(numPortsTall-1)*Port.PortHeight
)
def boundingRect(self):
penWidth = 1.0
return QtCore.QRectF(-penWidth / 2, -penWidth / 2,
150 + penWidth, self.backgroundHeight)
def paint(self, painter, option, widget):
painter.setBrush(self.brushes['background'])
if self.isSelected():
painter.setPen(self.colors['selectedBorder'])
painter.drawRoundedRect(0, 0, 150, self.backgroundHeight, 5, 5)
else:
painter.setPen(self.colors['normalBorder'])
painter.drawRoundedRect(0, 0, 150, self.backgroundHeight, 5, 5)
painter.setPen(self.colors['normalBorder'])
painter.drawText(QtCore.QRectF(0, 0, 150, 20), QtCore.Qt.AlignCenter, self.name)
def delete(self):
for wire in self.wires():
wire.delete()
self.scene().notifyView('remove', self)
self.scene().removeItem(self)
def wires(self):
wires = [item for item in self.scene().items()
if type(item) == Wire
if (self.hasPort(item.port1) or self.hasPort(item.port2))]
return wires
def hasPort(self, port):
if port in self.childItems():
return True
else:
return False
def mouseDoubleClickEvent(self, event):
self.scene().notifyView('show', self)
| emergent-interfaces/open-playout | src/graph/node.py | Python | gpl-3.0 | 3,765 |
def preplot(result, options):
x = np.arange(1, 36, 1)
samples = len(result['samples'])
result['samples'] = np.array(result['samples']) + 1
if samples == 1:
title = "(a) "+str(samples)+" Samples"
elif samples == 2:
title = "(b) "+str(samples)+" Samples"
elif samples == 10:
title = "(c) "+str(samples)+" Samples"
elif samples == 50:
title = "(d) "+str(samples)+" Samples"
elif samples == 100:
title = "(e) "+str(samples)+" Samples"
else:
title = "(f) "+str(samples)+" Samples"
return x, title
def postplot(ax, p, result, options):
import re
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 16 }
[ax11, ax12, ax21, ax22, ax31, ax32] = ax
[ p11, p12, p21, p22, p31, p32] = p
# adjust style
ax11.lines[2].set_linewidth(2.5)
ax12.lines[0].set_linewidth(0.0)
ax12.lines[0].set_linestyle('--')
ax12.lines[0].set_marker('_')
ax12.lines[0].set_markersize(12)
ax12.lines[0].set_mew(2.5)
ax11.set_xlim(1,35)
ax11.set_xlabel(r'$x$', font)
ax11.set_ylabel(r'$p_{x,s}$', font)
ax11.set_ylim(0,1)
ax11.legend([p11, p12], [r'$\mathbb{E}( p_{x,s} \mid {\bf Y}^\mathcal{X}_{{\bf n}_\mathcal{X}})$', 'Ground truth'], ncol=2, mode='expand', loc=3, frameon=False, borderaxespad=0., prop=font)
ax12.set_ylabel('Ground truth', font)
ax21.set_xlim(1,35)
ax21.set_ylim(0,30)
ax21.set_ylabel('Counts', font)
ax21.set_xlabel(r'$x$', font)
ax21.legend([p22], ['$U(x)$'], loc=4, prop=font)
ax22.set_ylabel(r'$U(x)$', font)
if ax31:
ax31.set_ylabel(r'$P(M = m \mid {\bf Y}^\mathcal{X}_{{\bf n}_\mathcal{X}})$', font)
ax31.set_xlabel(r'$m$', font)
ax32.set_axis_off()
| pbenner/adaptive-sampling | doc/hmm/example/example1-visualization.py | Python | gpl-2.0 | 1,800 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils import timezone
from django.core.management.base import BaseCommand
from geonode.layers.utils import upload
from geonode.people.utils import get_valid_user
import traceback
import datetime
class Command(BaseCommand):
help = ("Brings a data file or a directory full of data files into a"
" GeoNode site. Layers are added to the Django database, the"
" GeoServer configuration, and the pycsw metadata index.")
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('path', nargs='*', help='path [path...]')
# Named (optional) arguments
parser.add_argument(
'-u',
'--user',
dest="user",
default=None,
help="Name of the user account which should own the imported layers")
parser.add_argument(
'-i',
'--ignore-errors',
action='store_true',
dest='ignore_errors',
default=False,
help='Stop after any errors are encountered.')
parser.add_argument(
'-o',
'--overwrite',
dest='overwrite',
default=False,
action="store_true",
help="Overwrite existing layers if discovered (defaults False)")
parser.add_argument(
'-k',
'--keywords',
dest='keywords',
default="",
help=("The default keywords, separated by comma, for the imported"
" layer(s). Will be the same for all imported layers"
" if multiple imports are done in one command"))
parser.add_argument(
'-l',
'--license',
dest='license',
default=None,
help=("The license for the imported layer(s). Will be the same for"
" all imported layers if multiple imports are done"
" in one command"))
parser.add_argument(
'-c',
'--category',
dest='category',
default=None,
help=("The category for the imported layer(s). Will be the same"
" for all imported layers if multiple imports are done"
" in one command"))
parser.add_argument(
'-r',
'--regions',
dest='regions',
default="",
help=("The default regions, separated by comma, for the imported"
" layer(s). Will be the same for all imported layers if"
" multiple imports are done in one command"))
parser.add_argument(
'-n',
'--name',
dest='layername',
default=None,
help="The name for the imported layer(s). Can not be used with multiple imports")
parser.add_argument(
'-t',
'--title',
dest='title',
default=None,
help=("The title for the imported layer(s). Will be the same for"
" all imported layers if multiple imports are done"
" in one command"))
parser.add_argument(
'-a',
'--abstract',
dest='abstract',
default=None,
help=("The abstract for the imported layer(s). Will be the same for"
"all imported layers if multiple imports are done"
"in one command"))
parser.add_argument(
'-d',
'--date',
dest='date',
default=None,
help=('The date and time for the imported layer(s). Will be the '
'same for all imported layers if multiple imports are done '
'in one command. Use quotes to specify both the date and '
'time in the format \'YYYY-MM-DD HH:MM:SS\'.'))
parser.add_argument(
'-p',
'--private',
dest='private',
default=False,
action="store_true",
help="Make layer viewable only to owner")
parser.add_argument(
'-m',
'--metadata_uploaded_preserve',
dest='metadata_uploaded_preserve',
default=False,
action="store_true",
help="Force metadata XML to be preserved")
parser.add_argument(
'-C',
'--charset',
dest='charset',
default='UTF-8',
help=("Specify the charset of the data"))
def handle(self, *args, **options):
verbosity = int(options.get('verbosity'))
# ignore_errors = options.get('ignore_errors')
username = options.get('user')
user = get_valid_user(username)
overwrite = options.get('overwrite')
name = options.get('layername', None)
title = options.get('title', None)
abstract = options.get('abstract', None)
date = options.get('date', None)
license = options.get('license', None)
category = options.get('category', None)
private = options.get('private', False)
metadata_uploaded_preserve = options.get('metadata_uploaded_preserve',
False)
charset = options.get('charset', 'UTF-8')
if verbosity > 0:
console = self.stdout
else:
console = None
if overwrite:
skip = False
else:
skip = True
keywords = options.get('keywords').split(',')
if len(keywords) == 1 and keywords[0] == '':
keywords = []
else:
keywords = [k.strip() for k in keywords]
regions = options.get('regions').split(',')
if len(regions) == 1 and regions[0] == '':
regions = []
else:
regions = [r.strip() for r in regions]
start = datetime.datetime.now(timezone.get_current_timezone())
output = []
for path in options['path']:
out = upload(
path,
user=user,
overwrite=overwrite,
skip=skip,
name=name,
title=title,
abstract=abstract,
date=date,
keywords=keywords,
verbosity=verbosity,
console=console,
license=license,
category=category,
regions=regions,
private=private,
metadata_uploaded_preserve=metadata_uploaded_preserve,
charset=charset)
output.extend(out)
updated = [dict_['file']
for dict_ in output if dict_['status'] == 'updated']
created = [dict_['file']
for dict_ in output if dict_['status'] == 'created']
skipped = [dict_['file']
for dict_ in output if dict_['status'] == 'skipped']
failed = [dict_['file']
for dict_ in output if dict_['status'] == 'failed']
finish = datetime.datetime.now(timezone.get_current_timezone())
td = finish - start
duration = td.microseconds / 1000000 + td.seconds + td.days * 24 * 3600
duration_rounded = round(duration, 2)
if verbosity > 1:
print("\nDetailed report of failures:")
for dict_ in output:
if dict_['status'] == 'failed':
print("\n\n", dict_['file'], "\n================")
traceback.print_exception(dict_['exception_type'],
dict_['error'],
dict_['traceback'])
if verbosity > 0:
print("\n\nFinished processing {} layers in {} seconds.\n".format(
len(output), duration_rounded))
print("{} Created layers".format(len(created)))
print("{} Updated layers".format(len(updated)))
print("{} Skipped layers".format(len(skipped)))
print("{} Failed layers".format(len(failed)))
if len(output) > 0:
print("{} seconds per layer".format(duration * 1.0 / len(output)))
| tomkralidis/geonode | geonode/layers/management/commands/importlayers.py | Python | gpl-3.0 | 9,091 |
import logging
import httplib2
import os
"""
Application and User specific settings most of which are required
for full script functionality.
Each setting is accompanied by detailed comments outlining how to
acquire the value needed along with the format the script requires.
This document may be replaced in the near future with an interactive
script and validator in an effort to greatly reduce the potential errors
due to improper or invalid settings.
"""
__author__ = 'siorai@gmail.com (Paul Waldorf)'
######################################################
#[General Information and initial setup requirements]#
######################################################
#
# Please note, that as 1/7/2017, the proceeding detailed description
# is accurate for creating the a Project and gaining OAuth2 credentials
# from the developers console via Google. If this process changes, I'll
# do my best to make sure updated information is available in this package.
#
# Setting up this script requires a bit of work to setup initially fair warning
# First, you'll need to create your own project at Google at
#
# https://console.developers.google.com
#
# From here, you'll select 'Project' and select create new. Give it a name.
# Next on the left, you'll need to create some credentials by selecting
# 'Credentials' on the side bar.
#
# Go over to the OAuth consent screen and select a Product Name, which is
# what you'll see when you authorize this project's ability to access your
# Google Account.
#
# Then you'll select Credentials.
#
# Here you have two options, an OAuth Client ID, or a Service Account.
#
# OAuth Client ID's are the typical choice for most applications that access
# google services. Typically, most applications that are developed
# that with any of Google's APIs are usually accessing different users data.
#
# You can find more about about the OAuth2 standard that google uses over at
#
# http://developers.google.com/identity/protocols/OAuth2
#
# Service Accounts eliminate the need for any sort of interaction on the
# user past initial setup which is why it's ideal in this situation.
# However it requires you to have administrative access to your own G-Suite.
# See details below.
#
# =OAuth2 Client ID Setup=
#
# Select Create Credentials then OAuth2 Cliend ID. When asked for
# the Application type, select 'other', give it a name, and hit create.
# It'll show your client ID and client secret on screen, don't worry
# and hit OK. From there you'll see a table with an entry. On the far right
# of that you'll see a download arrow, this is the authentication for
# your application that identifies the script as being associated with this
# project. Inside this JSON you'll have half of the puzzle to make calls
# to the API. Rename this file to something more manageable and make
# sure to put it in a location that both your normal user, and the user
# your transmission-daemon runs under can access. (To check to make sure
# it can access at it, run a 'cat' command against it with a -u (transmission
# user name) argument on it, the same goes for the rest of the script.
# Keep this file in a secure location, then go down to
#
# oauth2keyfile = "/path/to/keyfile"
#
# and change it to the path where it can find it.
#
# oauth2keyfile = "/home/user/json/client_secret.json"
#
# =Service Account Setup=
#
# Select create credentials then hit Service Account. Under the Service
# Account drop box, select New Service Account. Make up a name for the
# account, under 'Role', select Project, then Owner. Make sure JSON is ticked
# and not P12. Then create. This will create a copy and then start the
# download of the only JSON keyfile that will ever be created for this
# service account. If something happens to it, you can't request another
# copy. (You can just create a new service account using the same process
# though).
#
# Back on the Credentials page, select 'Manage service accounts' just over the
# list of Service Account Keys. Here it should show 2 accounts by default,
# a Compute Engine default service account, and then the one you just created.
# all the way to the left, you'll see 3 option dots, select that, then check the
# Enable G Suite Domain-wide Delegation box, then save. This will show a new
# option for View Client ID. Selecting that will bring you to a page with the
# client ID that was created for that service account. That ID is what you'll
# enter into the Administration Console of your G-Suite that authorizes access
# blanket total access to every user under that G-Suite. By utilizing this,
# you'll also have to select a user name and enter it in 'delegated_email'.
# Since Service Accounts aren't covered under any of the controls normally
# associated with all accounts on the G-Suite, and thus don't have their own
# assigned spaces for things like Google Drive and Gmail, the 'delegated_email'
# is the user that the service account will access. See
#
# https://developers.google.com/identity/protocols/OAuth2ServiceAccount?hl=en_US#delegatingauthority
#
# for further information.
#
# And fill out the fields under the Service Account Section below.
#
# =API Activation=
#
# This script currently uses 3 APIs that you'll need to enable on the project
# you've just created. Selecting 'Library' on the left sidebar should load a
# list of APIs. The three APIs you'll need to enable are the Drive API, the
# Gmail API, and the URL Shortener API. Select Each one, then up above the
# description it'll show a button for 'Enable'. Select it. Then go back to
# the library and enable the other two APIs.
#########################
#[Transmission settings]#
#########################
torrentFileDirectory = "/path/to/torrent/directory/"
# Local torrent directory where torrents are kept.
# Used to fetch information for parsing tracker details
# in order to sort.
###########################
#[Remote sorting settings]#
###########################
# Dictionary used for sorting.
#
# For each catagory listed, you'll need to have a Folder ID
# that can be obtained from the address when you visit that directory
# remotely on drive.google.com
#
# For every catagory, there is also a set of rules in place for them
# Currently these are my working settings and they seem to be working pretty
# well for my usage. You can get more details on it in the Rules.py script.
#
# Each catagory also has ['Matches']['Match_Tracker'] this will also need to
# be supplied with the relative trackers for each associated catagory.
#
# If you'd rather not mess with the sorting at all and/or run into issues
# keep this setting as False and it won't run the sorting portion of the
# script at all.
SortTorrents = False
categoriesDictSettings = {
'Music': {
'Folder_ID': ['somefolder'], # folder_ID for Music here
'Matches': {
'Match_Tracker': [ # list of Music trackers
'http://tracker.example1.com/announce',
'http://tracker.example2.com/announce',
'http://tracker.example3.com/announce',
],
'Match_Content_Extention' : [ # list of file extentions
'*.aac',
'*.flac',
'*.mp3'
]
}
},
'TV': {
'Folder_ID': ['somefolder'], # folder_ID for TV here
'Matches': {
'Match_Tracker': [ # list of TV trackers
'http://tracker.example1.com/announce',
'http://tracker.example2.com/announce',
'http://tracker.example3.com/announce',
],
'Match_Expression': [ # list of TV expressions
'*.S??E??.*',
'*.s??e??.*'
]
}
},
'Movies':{
'Folder_ID': ['somefolder'], # folder_ID for Movies here
'Matches': {
'Match_Tracker': [ # list of Movies trackers
'http://tracker.example1.com/announce',
'http://tracker.example2.com/announce',
'http://tracker.example3.com/announce',
],
}
},
'XXX': {
'Folder_ID': ['somefolder'], # folder_ID for XXX here
'Matches': {
'Match_Tracker': [ # list of XXX trackers
'http://tracker.example1.com/announce',
'http://tracker.example2.com/announce',
'http://tracker.example3.com/announce',
],
}
}
}
###################
#[Shared settings]#
###################
# This script supports 2 flows currently, one from Service Level Authentication,
# The other one being Oauth2JSONFlow.
flow_to_use = "Oauth2JSONFlow"
# choices are:
# "ServiceAccountFlow" ( Implemented )
# "Oauth2JSONFlow" ( Implemented )
# "Oauth2WebFlow" ( Not Implemented )
scopes = [
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/gmail.compose',
'https://www.googleapis.com/auth/urlshortener'
]
redirect_uri = "http://example.com/auth_return"
######################################
#[Default Google Drive Upload Folder]#
######################################
# This is the ID of the folder that all uploads will be uploaded to.
# You need to obtain from visiting drive.google.com and entering/creating
# a folder, and taking the 28 character string at the end.
#
# for example: https://drive.google.com/drive/u/3/folders/60B4jmMf2bD9-bm9XSkw2Z2w0d2M
#
# Would be:
googledrivedir = ['60B4jmMf2bD9-bm9XSkw2Z2w0d2M']
####################
#[Logging settings]#
####################
# Location of logfile.
logfile = "./upload.log"
# Logging is defaulted for DEBUG for now. Fair warning, this -will- create a rather large
# log. Uploading folders of 2~ gigs have produced log files of over 1MB. Be advised.
# And feel free to adjust accordingly.
loglevel = logging.DEBUG
loggingSetup = logging.basicConfig(filename=logfile,level=loglevel,format='%(asctime)s %(message)s')
# Setting to include the most possible information from each HTTP request. Defaulted to 4 for
# troubleshooting
httplib2.debuglevel = 4
################################
#[Google Drive Upload Settings]#
################################
# This setting allows for commandline usage directed at single files to be
# uploaded to Google Drive based Pastebin-like folder for rapid transfer
useSpecialforSingles = True
# Setting for pastingbin folder ID
pastingbin = ['60B4jmMf2bD9-bm9XSkw2Z2w0d2M']
# When set to True, modifies the permisisons of any file/folder created by this script to
# be viewable by anyone. Change to False to leave it as default.
nonDefaultPermissions = True
permissionValue = 'anyone'
permissionType = 'anyone'
permissionRole = 'reader'
chunksize = 50000*1024 #Uploading Chunksize
##################
#[Email settings]#
##################
# The script will parse the JSON response from the server after each request
# in order to fill out the data on the table from the email. Visit:
#
# https://developers.google.com/drive/v2/reference/files#resource
#
# For a full list of supported properties.
#
# Note that the 'alt_tiny' is a custom created url that takes the
# 'alternateLink' property and sends it to the URLShortener API to trim
# those 50-80 character long links to a more managable size.
emailparameters = ['title', 'md5Checksum', 'id', 'alt_tiny', 'fileSize']
# Email parameters for the table that gets emailed. Replace both with your
# email associated with google account your using to send it to yourself.
emailSender = "someuser@gmail.com"
emailTo = "someuser@gmail.com"
# When I impliment it, setting this value to True will delete the temporary
# HTML file it creates for the table.
deleteTmpHTML = False
# Temporary file name. os.getpid() (mostly) ensures that tmp files will be
# Unique and not overwritten.
tempfilename = './temp.%s.html' % os.getpid()
###############################
#[Service Account Credentials]#
###############################
servicekeyfile = "/path/to/servicekey.json"
client_email = "someprojectname@blahblah.gserviceaccount.com"
delegated_email = "userdata@access.com"
############################
#[Normal JSON Oauth2 Creds]#
############################
# picklecredsFile location is where the authorized credentials instance will be stored
# for later use.
pickledcredsFile = "./user.creds"
oauth2keyfile = "/home/someone/client_secret.json"
redirect_uri = 'urn:ietf:wg:oauth:2.0:oob'
#################################
#[Normal WebServer Oauth2 Creds]#
#################################
# not implimented yet
oauth2web_id = "randomintblahblah.apps.googleusercontent.com"
oauth2web_secret = "oauth2web_secret"
| siorai/AutoUploaderGoogleDrive | AutoUploaderGoogleDrive/settings.py | Python | gpl-3.0 | 12,802 |
from database_testing import DatabaseTest
from database import db
import models
class StatisticsTest(DatabaseTest):
def exposed_stats(self):
from stats import Statistics
s = Statistics()
s.calc_all()
return s.get_all()
def test_simple_models(self):
model_stats = {
'pathways': models.Pathway,
'proteins': models.Protein,
'genes': models.Gene,
'kinases': models.Kinase,
'kinase_groups': models.KinaseGroup,
'sites': models.Site,
'cancer': models.Cancer
}
for name, model in model_stats.items():
model_objects = [model() for _ in range(10)]
db.session.add_all(model_objects)
stats = self.exposed_stats()
for name, model in model_stats.items():
assert stats[name] == 10
def test_mutations_count(self):
mutation_models = {
model.name: model
for model in [
models.MIMPMutation,
models.The1000GenomesMutation,
models.MC3Mutation,
models.ExomeSequencingMutation,
models.InheritedMutation
]
}
for name, model in mutation_models.items():
m = models.Mutation()
metadata = model(mutation=m)
db.session.add(metadata)
stats = self.exposed_stats()
def get_var_name(model_name):
return model_name.replace('1', 'T')
for name, model in mutation_models.items():
assert stats['muts'][get_var_name(name)] == 1
assert stats['muts']['all'] == len(mutation_models)
# confirmed are all without mimp
assert stats['muts']['all_confirmed'] == len(mutation_models) - 1
def test_from_many_sources(self):
# create one mutation which is present in multiple sources
m = models.Mutation()
metadata_1 = models.InheritedMutation(mutation=m)
metadata_2 = models.MC3Mutation(mutation=m)
db.session.add_all([metadata_1, metadata_2])
from stats import Statistics
statistics = Statistics()
in_many_sources = statistics.from_more_than_one_source()
assert in_many_sources == 1
def test_interactions(self):
from models import Protein, Site, Kinase, KinaseGroup
p1 = Protein(
sites=[
Site(),
Site(kinases=[Kinase()], kinase_groups=[KinaseGroup()])
]
)
db.session.add(p1)
p2 = Protein(
sites=[Site(kinases=[Kinase()])]
)
db.session.add(p2)
u_all_interactions = 0
u_kinases_covered = set()
u_kinase_groups_covered = set()
u_proteins_covered = set()
for protein in models.Protein.query.all():
for site in protein.sites:
kinases = site.kinases
kinase_groups = site.kinase_groups
u_all_interactions += len(kinases) + len(kinase_groups)
u_kinases_covered.update(kinases)
u_kinase_groups_covered.update(kinase_groups)
if kinases or kinase_groups:
u_proteins_covered.add(protein)
from stats import Statistics
statistics = Statistics()
all_interactions = statistics.interactions()
kinases_covered = statistics.kinases_covered()
kinase_groups_covered = statistics.kinase_groups_covered()
proteins_covered = statistics.proteins_covered()
assert all_interactions == u_all_interactions
assert kinases_covered == len(u_kinases_covered)
assert kinase_groups_covered == len(u_kinase_groups_covered)
assert proteins_covered == len(u_proteins_covered)
def test_table_generation(self):
from stats import generate_source_specific_summary_table
table = generate_source_specific_summary_table()
assert table
| reimandlab/Visualisation-Framework-for-Genome-Mutations | website/tests/test_statistics.py | Python | lgpl-2.1 | 4,004 |
from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
| HellerCommaA/flask-material-lite | sample_application/__init__.py | Python | mit | 2,763 |
import numpy as np
import pandas as pd
frame = pd.DataFrame(np.arange(12).reshape(4,3),
index=[['a','a','b','b'],[1,2,1,2]],
columns=[['Ohio','Ohio','Colorado'],
['Green','Red','Green']])
data = pd.Series(np.random.randn(9), index=[['a','a','a','b','b','c','c','d','d'],[1,2,3,1,3,1,2,2,3]])
data
data.index
data['b']
data['b':'c']
data.loc[['b', 'd']]
data.loc[:,2]
data.unstack()
data.unstack().stack()
frame = pd.DataFrame(np.arange(12).reshape(4,3),
index=[['a','a','b','b'],[1,2,1,2]],
columns=[['Ohio','Ohio','Colorado'],
['Green','Red','Green']])
frame
frame.index.names = ['key1', 'key2']
frame.columns.names = ['state', 'color']
frame
frame['Ohio']
frame[:,'a']
frame.loc[:,'a']
frame.loc['a']
frame
frame.loc['a', 'Ohio']
frame.loc[['a','b'], 'Ohio']
frame[:,'Green']
frame.loc[:,'Green']
frame[[:,'Green']]
frame[['Green']]
frame[['a']]
frame['a']
frame.loc['a']
frame.loc[:, 'Green']
frame
frame.loc[:, [:, 'Green']]
frame.loc[:, 'Ohio']
frame.loc[:, 'Ohio']['Green']
frame.loc[:]['Green']
frame.loc[:, :]['Green']
| eroicaleo/LearningPython | PythonForDA/ch08/hier_index.py | Python | mit | 1,173 |
#!/usr/bin/env python3
# Copyright (C) 2017 Christian Thomas Jacobs.
# This file is part of PyQSO.
# PyQSO is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyQSO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyQSO. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
import unittest
from pyqso.compare import *
class TestCompare(unittest.TestCase):
""" The unit tests for the comparison schemes. """
def setUp(self):
""" Set up the objects needed for the unit tests. """
data_types = [int] + [str]*3
self.model = Gtk.ListStore(*data_types)
row1 = [0, "100", "20150323", "1433"]
self.model.append(row1)
row2 = [1, "5000", "20160423", "1432"]
self.model.append(row2)
row3 = [2, "5000", "20160423", "1433"]
self.model.append(row3)
row4 = [3, "25", "20160423", "1433"]
self.model.append(row4)
return
def test_compare_default(self):
""" Check the correctness of the default comparison scheme. """
# Get the row iterables.
path = Gtk.TreePath(0)
iter1 = self.model.get_iter(path)
iter2 = self.model.iter_next(iter1)
iter3 = self.model.iter_next(iter2)
iter4 = self.model.iter_next(iter3)
# Compare values in the second column.
column_index = 1
result = compare_default(self.model, iter1, iter2, column_index)
assert(result == -1)
result = compare_default(self.model, iter2, iter3, column_index)
assert(result == 0)
result = compare_default(self.model, iter3, iter4, column_index)
assert(result == 1)
def test_compare_date_and_time(self):
""" Check that dates in yyyymmdd format are compared correctly. """
# Get the row iterables.
path = Gtk.TreePath(0)
iter1 = self.model.get_iter(path)
iter2 = self.model.iter_next(iter1)
iter3 = self.model.iter_next(iter2)
iter4 = self.model.iter_next(iter3)
# Compare values in the third (and fourth, if necessary) column.
column_index = 2
result = compare_date_and_time(self.model, iter1, iter2, [column_index, column_index+1])
assert(result == -1)
result = compare_date_and_time(self.model, iter2, iter3, [column_index, column_index+1])
assert(result == -1)
result = compare_date_and_time(self.model, iter3, iter4, [column_index, column_index+1])
assert(result == 0)
result = compare_date_and_time(self.model, iter4, iter1, [column_index, column_index+1])
assert(result == 1)
if(__name__ == '__main__'):
unittest.main()
| ctjacobs/pyqso | tests/test_compare.py | Python | gpl-3.0 | 3,153 |
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import vtk
from ..base import PluginBase
from gui.qt.resliceaction import ResliceAction
class ReslicePlugin(PluginBase):
def __init__(self, ilsa):
logging.debug("In ReslicePlugin::__init__()")
self._name = None
self._action = ResliceAction(ilsa)
ilsa.add(self)
self._ilsa = ilsa
@property
def ilsa(self):
logging.debug("In ReslicePlugin::ilsa()")
return self._ilsa
@property
def action(self):
logging.debug("In ReslicePlugin::action()")
return self._action
@property
def name(self):
logging.debug("In ReslicePlugin::name()")
return self._name
@name.setter
def name(self, name):
logging.debug("In ReslicePlugin::name.setter()")
self._name = name
def notify(self, vtkInteractorStyle=None):
logging.debug("In ReslicePlugin::notify()")
def save(self):
logging.debug("In ReslicePlugin::save()")
def restore(self):
logging.debug("In ReslicePlugin::restore()")
@property
def description(self):
logging.debug("In ReslicePlugin::description()")
return "..."
@property
def separator(self):
logging.debug("In ReslicePlugin::separator()")
return False
@property
def status(self):
logging.debug("In ReslicePlugin::status()")
return True
| aevum/moonstone | src/moonstone/ilsa/plugins/reslice/reslice.py | Python | lgpl-3.0 | 2,331 |
# $Id$
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=
o=
s=
c=
t=
a=
"""
pjsua_args = "--null-audio --auto-answer 200"
extra_headers = ""
include = [ "Warning: " ] # better have Warning header
exclude = []
sendto_cfg = sip.SendtoCfg("Bad SDP syntax", pjsua_args, sdp, 400,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
| xiejianying/pjsip_trunk | tests/pjsua/scripts-sendto/155_err_sdp_bad_syntax.py | Python | gpl-2.0 | 367 |
# shieldRechargeRateAddPassive
#
# Used by:
# Subsystems from group: Defensive Systems (16 of 16)
type = "passive"
def handler(fit, module, context):
fit.ship.increaseItemAttr("shieldRechargeRate", module.getModifiedItemAttr("shieldRechargeRate") or 0)
| Ebag333/Pyfa | eos/effects/shieldrechargerateaddpassive.py | Python | gpl-3.0 | 259 |
#!/usr/bin/python
# coding=utf-8
#v2.0
import numpy as np
from sklearn.linear_model import LogisticRegression
import logging
logger = logging.getLogger("prob_model")
def lr(X, y):
logistic_regression = LogisticRegression(penalty="l2")
logistic_regression.fit(X, y)
return logistic_regression
def convert_onehot(one_hot_matrix):
"""
convert one hot encoding matrix back to vector
:param one_hot_matrix:
:return:
"""
vector = []
for i in one_hot_matrix:
vector.append(int(np.where(i == 1)[0]))
return vector
def combine_column(l):
"""
combine all the matrix in l
Ex.
> a = np.array([[1,1],[2,2],[3,3],[4,4],[5,5]])
> b = np.array([["a"],["a"],["a"],["a"],["a"]])
> l = [a,b]
> add_column(l)
> [['1' '1' 'a']
> ['2' '2' 'a']
> ['3' '3' 'a']
> ['4' '4' 'a']
> ['5' '5' 'a']]
"""
out = l[0]
l = l[1:]
for i in l:
out = np.append(out, i, 1)
return out
def write_output_to_file(filename, data):
with open(filename, "w") as output:
for item in data:
output.write("%s\n" % item)
def get_col(colname, matrix):
"""
return columns with colname in matrix
"""
cols = matrix[:, np.where(matrix[0,] == colname)]
return cols[1:,:].astype(float).squeeze()
class ProbModel(object):
def __init__(self, input_matrix):
self._input_matrix = input_matrix # whole matrix
self._mut_type = get_col("Mut_type", input_matrix) # mut type (one hot encoded)
self._exposure = get_col("Exposure", input_matrix) # exposure vector
self._mut = combine_column([self._mut_type, self._exposure]) # combine mut_type and exposure vector for later use
self._transregion = get_col("Transcribed", input_matrix) # transcribed
self._strand =get_col("Strand", input_matrix) #strand information
self._chromatin = get_col("Chromatin", input_matrix) #chromatin accessibility
self._p_ce = get_col("p_ce", input_matrix) # calculated p_ce vector
self._vaf = get_col("VAF", input_matrix) # vaf: VAF to actural VAF * 2
# combine mut and chromatin for later use
self._tr_X = combine_column([self._mut, self._chromatin.reshape(self._chromatin.shape[0], 1)])
# combine mut and strand for later use
self._strand_X = combine_column([self._mut, self._transregion.reshape(self._transregion.shape[0], 1)])
def _fit_chromatin(self):
self._ch_lr = lr(self._mut, self._chromatin)
def _fit_transregion(self):
if -1 in self._chromatin:
self._trans_lr = lr(self._mut, self._transregion)
else:
self._trans_lr = lr(self._tr_X, self._transregion)
def _fit_strand(self):
# select rows not equal to -1
# get index of row != -1 for features and labels
idx = np.where(self._strand != -1)[0]
strand_X = self._strand_X[idx]
labels = self._strand[idx]
# fit the model
self._strand_lr = lr(strand_X, labels)
def _fit(self):
if -1 not in self._chromatin:
self._fit_chromatin()
self._fit_strand()
self._fit_transregion()
def _predict_proba(self, mut, tr_X, strand_X, strand_label):
if -1 in self._chromatin:
p_a = np.asarray([1]*self._mut.shape[0]).reshape(self._mut.shape[0],1)
else:
p_a = self._ch_lr.predict_proba(mut)
p_t = self._trans_lr.predict_proba(tr_X)
idx = np.where(strand_label != -1)[0]
p_s_predicted = self._strand_lr.predict_proba(strand_X[idx])
#create empty array
p_s = np.ones(shape=(strand_label.shape[0], 2), dtype=float)
# insert predicted prob into p_s
p_s[idx]=p_s_predicted.astype(float)
return p_a, p_t, p_s
def _calculate_proba(self,p_a, p_t, p_s):
mut_prob = []
mutation_type = convert_onehot(self._mut_type)
mut_prob.append("mut\tVAF\tprob\tp_ai\tp_si\tp_ti\tp_ce")
for i in range(len(self._p_ce)):
p_ti = p_t[i][int(self._transregion[i])]
if self._strand[i] == [-1]:
p_si = 1
#p_si = p_s[i][0]
elif self._strand[i] == [0]:
#print(p_s.shape)
p_si = p_s[i][0]
else:
p_si = p_s[i][1]
if -1 not in self._chromatin:
p_ai = p_a[i][int(self._chromatin[i])]
else: p_ai = 1
mut_prob.append(str(mutation_type[i]) + "\t" + str(self._vaf[i]) + "\t" +
str(p_ai * p_ti * p_si * self._p_ce[i]) + "\t" +
str(p_ai) + "\t" + str(p_si) + "\t" +
str(p_ti) + "\t" + str(self._p_ce[i]))
return mut_prob
#
# if __name__ == "__main__":
# # test file
# #
# self._filename = filename
#
# filecontent = np.load(self._filename)
#
# train = TrainFromMatrixFile(train_file)
# train._fit()
# p_a, p_t, p_s = train._predict_proba()
# write_output_to_file("./train.txt", train._calculate_proba(p_a, p_t, p_s))
#
# test = TrainFromMatrixFile(test_file)
# test_pa, test_pt, test_ps = train._predict_proba(test._mut, test._tr_X, test._strand_X)
# write_output_to_file("./test.txt", test._calculate_proba(test_pa, test_pt, test_ps))
#
# low_support = TrainFromMatrixFile(lowsup_file)
# low_support_pa, low_support_pt, low_support_ps = train._predict_proba(low_support._mut, low_support._tr_X, low_support._strand_X)
# write_output_to_file("./low_sup.txt", low_support._calculate_proba(low_support_pa, low_support_pt, low_support_ps))
# random = TrainFromMatrixFile(random_file)
# random_pa, random_pt, random_ps = train._predict_proba(random._mut, random._tr_X, random._strand_X)
# write_output_to_file("./random.txt", random._calculate_proba(random_pa, random_pt, random_ps))
| RyogaLi/prob_model | src/prob_model.py | Python | gpl-3.0 | 5,242 |
def speedify(s):
out_chars = (len(s)+25) * [' '] # as big as we might need - strip any unused spaces later
for i in range(len(s)):
out_chars[i+(ord(s[i])-ord('A'))] = s[i]
return ''.join(out_chars).rstrip()
| SelvorWhim/competitive | Codewars/TheSpeedOfLetters.py | Python | unlicense | 227 |
from __future__ import absolute_import
from errbot import BotPlugin, botcmd, Command, botmatch
def say_foo(plugin, msg, args):
return 'foo %s' % type(plugin)
class Dyna(BotPlugin):
"""Just a test plugin to see if synamic plugin API works.
"""
@botcmd
def add_simple(self, _, _1):
simple1 = Command(lambda plugin, msg, args: 'yep %s' % type(plugin), name='say_yep')
simple2 = Command(say_foo)
self.create_dynamic_plugin('simple', (simple1, simple2), doc='documented')
return 'added'
@botcmd
def remove_simple(self, msg, args):
self.destroy_dynamic_plugin('simple')
return 'removed'
@botcmd
def add_re(self, _, _1):
re1 = Command(lambda plugin, msg, match: 'fffound',
name='ffound',
cmd_type=botmatch,
cmd_args=(r'^.*cheese.*$',))
self.create_dynamic_plugin('re', (re1, ))
return 'added'
@botcmd
def remove_re(self, msg, args):
self.destroy_dynamic_plugin('re')
return 'removed'
@botcmd
def add_saw(self, _, _1):
re1 = Command(lambda plugin, msg, args: '+'.join(args),
name='splitme',
cmd_type=botcmd,
cmd_kwargs={'split_args_with': ','})
self.create_dynamic_plugin('saw', (re1, ))
return 'added'
@botcmd
def remove_saw(self, msg, args):
self.destroy_dynamic_plugin('saw')
return 'removed'
@botcmd
def clash(self, msg, args):
return 'original'
@botcmd
def add_clashing(self, _, _1):
simple1 = Command(lambda plugin, msg, args: 'dynamic', name='clash')
self.create_dynamic_plugin('clashing', (simple1, ))
return 'added'
@botcmd
def remove_clashing(self, _, _1):
self.destroy_dynamic_plugin('clashing')
return 'removed'
| mrshu/err | tests/dyna_plugin/dyna.py | Python | gpl-3.0 | 1,922 |
# Copyright (c) 2015 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp driver protocols NFS class module.
"""
import copy
from unittest import mock
import uuid
import ddt
from manila import exception
from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode
from manila import test
from manila.tests.share.drivers.netapp.dataontap.protocols \
import fakes as fake
@ddt.ddt
class NetAppClusteredNFSHelperTestCase(test.TestCase):
def setUp(self):
super(NetAppClusteredNFSHelperTestCase, self).setUp()
self.mock_context = mock.Mock()
self.mock_client = mock.Mock()
self.helper = nfs_cmode.NetAppCmodeNFSHelper()
self.helper.set_client(self.mock_client)
@ddt.data(('1.2.3.4', '1.2.3.4'), ('fc00::1', '[fc00::1]'))
@ddt.unpack
def test__escaped_address(self, raw, escaped):
self.assertEqual(escaped, self.helper._escaped_address(raw))
@ddt.data(True, False)
def test_create_share(self, is_flexgroup):
mock_ensure_export_policy = self.mock_object(self.helper,
'_ensure_export_policy')
self.mock_client.get_volume_junction_path.return_value = (
fake.NFS_SHARE_PATH)
self.mock_client.get_volume.return_value = {
'junction-path': fake.NFS_SHARE_PATH,
}
result = self.helper.create_share(fake.NFS_SHARE, fake.SHARE_NAME,
is_flexgroup=is_flexgroup)
export_addresses = [fake.SHARE_ADDRESS_1, fake.SHARE_ADDRESS_2]
export_paths = [result(address) for address in export_addresses]
expected_paths = [
fake.SHARE_ADDRESS_1 + ":" + fake.NFS_SHARE_PATH,
fake.SHARE_ADDRESS_2 + ":" + fake.NFS_SHARE_PATH,
]
self.assertEqual(expected_paths, export_paths)
(self.mock_client.clear_nfs_export_policy_for_volume.
assert_called_once_with(fake.SHARE_NAME))
self.assertTrue(mock_ensure_export_policy.called)
if is_flexgroup:
self.assertTrue(self.mock_client.get_volume.called)
else:
self.assertTrue(self.mock_client.get_volume_junction_path.called)
def test_delete_share(self):
self.helper.delete_share(fake.NFS_SHARE, fake.SHARE_NAME)
(self.mock_client.clear_nfs_export_policy_for_volume.
assert_called_once_with(fake.SHARE_NAME))
self.mock_client.soft_delete_nfs_export_policy.assert_called_once_with(
fake.EXPORT_POLICY_NAME)
def test_update_access(self):
self.mock_object(self.helper, '_ensure_export_policy')
self.mock_object(self.helper,
'_get_export_policy_name',
mock.Mock(return_value='fake_export_policy'))
self.mock_object(self.helper,
'_get_temp_export_policy_name',
mock.Mock(side_effect=['fake_new_export_policy',
'fake_old_export_policy']))
fake_auth_method = 'fake_auth_method'
self.mock_object(self.helper,
'_get_auth_methods',
mock.Mock(return_value=fake_auth_method))
self.helper.update_access(fake.CIFS_SHARE,
fake.SHARE_NAME,
[fake.IP_ACCESS])
self.mock_client.create_nfs_export_policy.assert_called_once_with(
'fake_new_export_policy')
self.mock_client.add_nfs_export_rule.assert_called_once_with(
'fake_new_export_policy', fake.CLIENT_ADDRESS_1, False,
fake_auth_method)
(self.mock_client.set_nfs_export_policy_for_volume.
assert_called_once_with(fake.SHARE_NAME, 'fake_new_export_policy'))
(self.mock_client.soft_delete_nfs_export_policy.
assert_called_once_with('fake_old_export_policy'))
self.mock_client.rename_nfs_export_policy.assert_has_calls([
mock.call('fake_export_policy', 'fake_old_export_policy'),
mock.call('fake_new_export_policy', 'fake_export_policy'),
])
def test_validate_access_rule(self):
result = self.helper._validate_access_rule(fake.IP_ACCESS)
self.assertIsNone(result)
def test_validate_access_rule_invalid_type(self):
rule = copy.copy(fake.IP_ACCESS)
rule['access_type'] = 'user'
self.assertRaises(exception.InvalidShareAccess,
self.helper._validate_access_rule,
rule)
def test_validate_access_rule_invalid_level(self):
rule = copy.copy(fake.IP_ACCESS)
rule['access_level'] = 'none'
self.assertRaises(exception.InvalidShareAccessLevel,
self.helper._validate_access_rule,
rule)
def test_get_target(self):
target = self.helper.get_target(fake.NFS_SHARE)
self.assertEqual(fake.SHARE_ADDRESS_1, target)
def test_get_share_name_for_share(self):
self.mock_client.get_volume_at_junction_path.return_value = (
fake.VOLUME)
share_name = self.helper.get_share_name_for_share(fake.NFS_SHARE)
self.assertEqual(fake.SHARE_NAME, share_name)
self.mock_client.get_volume_at_junction_path.assert_called_once_with(
fake.NFS_SHARE_PATH)
def test_get_share_name_for_share_not_found(self):
self.mock_client.get_volume_at_junction_path.return_value = None
share_name = self.helper.get_share_name_for_share(fake.NFS_SHARE)
self.assertIsNone(share_name)
self.mock_client.get_volume_at_junction_path.assert_called_once_with(
fake.NFS_SHARE_PATH)
def test_get_target_missing_location(self):
target = self.helper.get_target({'export_location': ''})
self.assertEqual('', target)
def test_get_export_location(self):
export = fake.NFS_SHARE['export_location']
self.mock_object(self.helper, '_get_share_export_location',
mock.Mock(return_value=export))
host_ip, export_path = self.helper._get_export_location(
fake.NFS_SHARE)
self.assertEqual(fake.SHARE_ADDRESS_1, host_ip)
self.assertEqual('/' + fake.SHARE_NAME, export_path)
@ddt.data('', 'invalid')
def test_get_export_location_missing_location_invalid(self, export):
fake_share = fake.NFS_SHARE.copy()
fake_share['export_location'] = export
self.mock_object(self.helper, '_get_share_export_location',
mock.Mock(return_value=export))
host_ip, export_path = self.helper._get_export_location(fake_share)
self.assertEqual('', host_ip)
self.assertEqual('', export_path)
self.helper._get_share_export_location.assert_called_once_with(
fake_share)
def test_get_temp_export_policy_name(self):
self.mock_object(uuid, 'uuid1', mock.Mock(return_value='fake-uuid'))
result = self.helper._get_temp_export_policy_name()
self.assertEqual('temp_fake_uuid', result)
def test_get_export_policy_name(self):
result = self.helper._get_export_policy_name(fake.NFS_SHARE)
self.assertEqual(fake.EXPORT_POLICY_NAME, result)
def test_ensure_export_policy_equal(self):
self.mock_client.get_nfs_export_policy_for_volume.return_value = (
fake.EXPORT_POLICY_NAME)
self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME)
self.assertFalse(self.mock_client.create_nfs_export_policy.called)
self.assertFalse(self.mock_client.rename_nfs_export_policy.called)
def test_ensure_export_policy_default(self):
self.mock_client.get_nfs_export_policy_for_volume.return_value = (
'default')
self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME)
self.mock_client.create_nfs_export_policy.assert_called_once_with(
fake.EXPORT_POLICY_NAME)
(self.mock_client.set_nfs_export_policy_for_volume.
assert_called_once_with(fake.SHARE_NAME, fake.EXPORT_POLICY_NAME))
self.assertFalse(self.mock_client.rename_nfs_export_policy.called)
def test_ensure_export_policy_rename(self):
self.mock_client.get_nfs_export_policy_for_volume.return_value = 'fake'
self.helper._ensure_export_policy(fake.NFS_SHARE, fake.SHARE_NAME)
self.assertFalse(self.mock_client.create_nfs_export_policy.called)
self.mock_client.rename_nfs_export_policy.assert_called_once_with(
'fake', fake.EXPORT_POLICY_NAME)
@ddt.data((False, ['sys']), (True, ['krb5', 'krb5i', 'krb5p']))
@ddt.unpack
def test__get_security_flavors(self, kerberos_enabled, security_flavors):
self.mock_client.is_kerberos_enabled.return_value = kerberos_enabled
result = self.helper._get_auth_methods()
self.assertEqual(security_flavors, result)
def test_cleanup_demoted_replica(self):
self.mock_object(self.helper, 'delete_share')
self.helper.cleanup_demoted_replica(fake.NFS_SHARE, fake.SHARE_NAME)
self.helper.delete_share.assert_called_once_with(fake.NFS_SHARE,
fake.SHARE_NAME)
| openstack/manila | manila/tests/share/drivers/netapp/dataontap/protocols/test_nfs_cmode.py | Python | apache-2.0 | 9,943 |
from django import template
register = template.Library()
@register.filter(name = 'implode')
def implode(lst, sep = ' - '):
return str(sep).join("%s" % (v) for v in lst)
"""
Tag que devuelve un atributo class con el texto pasado
"""
@register.tag(name = 'css_classes')
def css_classes(parser, token):
try:
tag_name, classes = token.split_contents()
except ValueError:
raise Exception(1)
return CssClassesNode(classes)
class CssClassesNode(template.Node):
def __init__(self, classes):
self.classes = classes
def render(self, context):
if len(self.classes) == 0:
return ''
return ' class=' + str(self.classes) + ''
def callMethod(obj, methodName):
method = getattr(obj, methodName)
if obj.__dict__.has_key("__callArg"):
ret = method(*obj.__callArg)
del obj.__callArg
return ret
return method()
def args(obj, arg):
if not obj.__dict__.has_key("__callArg"):
obj.__callArg = []
obj.__callArg += [arg]
return obj
register.filter("call", callMethod)
register.filter("args", args)
| MERegistro/meregistro | meregistro/custom_tags_filters/templatetags/tags_filters.py | Python | bsd-3-clause | 1,119 |
import unittest
from client import Client
class ClientTestCase(unittest.TestCase):
def test_with_redis(self):
client = Client()
client.set('tomato', 2)
self.assertEqual(2, int(client.get('tomato')))
| leehosung/pycon-testing | integration_test/tests/test_client.py | Python | mit | 230 |
from test_lib.utils import get_data_by_path
class __DEFAULT__: # pylint: disable=invalid-name,too-few-public-methods
pass
class ClassBase:
"""
This class that is meant to be used as base for class that could be stored or loaded (in ES or any other backend)
"""
_es_data_mapping = {}
_data_type = None
def __init__(self, es_data=None, **kwargs):
if es_data:
self.load_from_es_data(es_data)
if kwargs:
self.load_kwargs(kwargs)
def load_kwargs(self, kwargs):
errors = []
for data_name, value in kwargs.items():
data_type = self.__annotations__.get(data_name, None) # pylint: disable=no-member
if data_type is None:
errors.append(f'Wrong {data_name} attribute was provided')
continue
if not isinstance(value, data_type):
errors.append(f'Wrong {data_name} attribute value was provided')
continue
setattr(self, data_name, value)
if errors:
raise ValueError(
f"Following errors occurred during class {self.__class__.__name__} initialization: \n" +
"\n".join(errors)
)
def load_from_es_data(self, es_data):
"""
Fills instance data with data from ES
"""
if not isinstance(es_data, dict):
raise ValueError(f"Class {self.__class__.__name__} can be loaded only from dict")
data_mapping = self._es_data_mapping
for data_name, data_type in self.__annotations__.items(): # pylint: disable=no-member
data_path = data_mapping.get(data_name, __DEFAULT__)
if data_path is __DEFAULT__:
value = es_data.get(data_name, __DEFAULT__)
elif data_path == '':
value = es_data
else:
value = get_data_by_path(es_data, data_path=data_path, default=__DEFAULT__)
if value is __DEFAULT__:
continue
self._apply_data(data_name, data_type, value)
def save_to_es_data(self):
"""
Represents contents of the instance as ES data according to _es_data_mappings
"""
output = {}
def data_cb(data_instance, current_instance, data_path, es_data_path, is_edge):
if is_edge:
if isinstance(data_instance, ClassBase):
value = data_instance.save_to_es_data()
else:
value = data_instance
output['.'.join(es_data_path)] = value
self._iterate_data(data_cb)
return output
def _apply_data(self, data_name, data_type, value):
setattr(self, data_name, data_type(value))
def is_valid(self):
for data_name in self.__annotations__.keys(): # pylint: disable=no-member
default = getattr(self.__class__, data_name)
value = getattr(self, data_name, None)
if value is default:
return False
elif isinstance(value, ClassBase):
if not value.is_valid():
return False
return True
@classmethod
def _get_all_es_data_mapping(cls, max_level=10) -> dict:
"""
Returns dictionary where keys are all possible class data paths and values are related ES data paths
"""
if max_level == 0:
return {}
output = {}
for data_name, data_type in cls.__annotations__.items(): # pylint: disable=no-member
data_path = cls._es_data_mapping.get(data_name, __DEFAULT__)
if data_path is __DEFAULT__:
data_path = data_name
# Set data
if isinstance(data_type, type) and issubclass(data_type, ClassBase):
if data_type.load_from_es_data is not cls.load_from_es_data:
# No mapping if custom loader defined
child_data_mapping = {}
else:
child_data_mapping = data_type._get_all_es_data_mapping( # pylint: disable=protected-access
max_level=max_level-1)
if not child_data_mapping:
output[data_name] = data_path
continue
for child_data_name, child_data_path in child_data_mapping.items():
if data_path:
output[f'{data_name}.{child_data_name}'] = f'{data_path}.{child_data_path}'
else:
output[f'{data_name}.{child_data_name}'] = child_data_path
else:
output[data_name] = data_path
return output
def _iterate_data(self, callback, data_path=None, es_data_path=None):
"""
Iterate all data in the instance by calling callback function
"""
if data_path is None:
data_path = []
if es_data_path is None:
es_data_path = []
instances = [(self, data_path, es_data_path)]
while instances:
current_instance, data_path, es_data_path = instances.pop()
for data_name, data_instance in current_instance.__dict__.items():
if current_instance.__annotations__.get(data_name, None) is None: # pylint: disable=no-member
continue
es_data_name = current_instance._es_data_mapping.get( # pylint: disable=protected-access
data_name, None)
if es_data_name is None:
es_data_name = es_data_path + [data_name]
elif es_data_name == '':
es_data_name = es_data_path
else:
es_data_name = es_data_path + es_data_name.split('.')
if not isinstance(data_instance, ClassBase) \
or data_instance.__class__.load_from_es_data is not ClassBase.load_from_es_data:
callback(data_instance, current_instance, data_path + [data_name], es_data_name, True)
continue
if callback(data_instance, current_instance, data_path + [data_name], es_data_name, False):
instances.insert(0, (data_instance, data_path + [data_name], es_data_name))
def _get_es_data_path_and_values_from_patterns(self, data_patterns: list, flatten=False):
"""
Reads data patterns and builds dictionary of es data paths as keys and instance values as values
If flatten is True, it will produce one level dictionary,
otherwise each level of data path will be represented by one level in dictionary
"""
data_patterns_split = []
for data_pattern in data_patterns:
data_patterns_split.append(data_pattern.split('.'))
output = {}
def data_cb(data_instance, current_instance, data_path, es_data_path, is_edge): # pylint: disable=too-many-branches, too-many-locals
final_return = False
for data_pattern_split in data_patterns_split:
to_add = len(data_pattern_split) == len(data_path)
to_return = False
for num, data_pattern_part in enumerate(data_pattern_split):
if num >= len(data_path):
to_add = False
final_return = True
break
data_path_part = data_path[num]
if data_pattern_part == '*':
to_add = is_edge
to_return = True
break
if data_pattern_part != data_path_part:
to_add = False
break
if to_add:
if isinstance(data_instance, ClassBase):
result = data_instance.save_to_es_data()
else:
result = data_instance
current_output = output
if flatten:
current_output['.'.join(es_data_path)] = result
else:
for es_data_path_part in es_data_path[:-1]:
new_current_output = current_output.get(es_data_path_part, __DEFAULT__)
if new_current_output is __DEFAULT__:
current_output[es_data_path_part] = {}
current_output = current_output[es_data_path_part]
else:
current_output = new_current_output
current_output[es_data_path[-1]] = result
if to_return:
return True
return final_return
self._iterate_data(data_cb)
return output
| scylladb/scylla-cluster-tests | sdcm/results_analyze/base.py | Python | agpl-3.0 | 8,871 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.convert.lrf_output_ui import Ui_Form
from calibre.gui2.convert import Widget
font_family_model = None
class PluginWidget(Widget, Ui_Form):
TITLE = _('LRF Output')
HELP = _('Options specific to')+' LRF '+_('output')
COMMIT_NAME = 'lrf_output'
ICON = I('mimetypes/lrf.png')
def __init__(self, parent, get_option, get_help, db=None, book_id=None):
Widget.__init__(self, parent,
['wordspace', 'header', 'header_format',
'minimum_indent', 'serif_family',
'render_tables_as_images', 'sans_family', 'mono_family',
'text_size_multiplier_for_rendered_tables', 'autorotation',
'header_separation', 'minimum_indent']
)
self.db, self.book_id = db, book_id
self.initialize_options(get_option, get_help, db, book_id)
self.opt_header.toggle(), self.opt_header.toggle()
self.opt_render_tables_as_images.toggle()
self.opt_render_tables_as_images.toggle()
| ashang/calibre | src/calibre/gui2/convert/lrf_output.py | Python | gpl-3.0 | 1,264 |
from gettext import gettext as _
from typing import Optional, Callable, List, Set
from blueman.main.DBusProxies import AppletService
from blueman.Service import Service, Action, Instance
from blueman.bluez.Device import Device
from blueman.bluez.Network import Network
from blueman.bluez.errors import BluezDBusException
class NetworkService(Service):
def __init__(self, device: Device, uuid: str):
super().__init__(device, uuid)
self._service = Network(obj_path=device.get_object_path())
@property
def available(self) -> bool:
# This interface is only available after pairing
paired: bool = self.device["Paired"]
return paired
@property
def connectable(self) -> bool:
return not self.available or not self._service["Connected"]
@property
def connected_instances(self) -> List[Instance]:
return [] if self.connectable else [Instance(self.name)]
def connect(
self,
reply_handler: Optional[Callable[[str], None]] = None,
error_handler: Optional[Callable[[BluezDBusException], None]] = None,
) -> None:
self._service.connect(self.uuid, reply_handler=reply_handler, error_handler=error_handler)
def disconnect(
self,
reply_handler: Optional[Callable[[], None]] = None,
error_handler: Optional[Callable[[BluezDBusException], None]] = None,
) -> None:
self._service.disconnect(reply_handler=reply_handler, error_handler=error_handler)
@property
def common_actions(self) -> Set[Action]:
def renew() -> None:
AppletService().DhcpClient('(s)', self.device.get_object_path())
return {Action(
_("Renew IP Address"),
"view-refresh",
{"DhcpClient"},
renew
)}
| blueman-project/blueman | blueman/services/meta/NetworkService.py | Python | gpl-3.0 | 1,813 |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
from core.domain import collection_services
from core.domain import event_services
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import user_jobs_continuous_test
from core.domain import user_services
from core.tests import test_utils
import feconf
import utils
from google.appengine.api import urlfetch
class UserServicesUnitTests(test_utils.GenericTestBase):
"""Test the user services methods."""
def test_set_and_get_username(self):
user_id = 'someUser'
username = 'username'
with self.assertRaisesRegexp(Exception, 'User not found.'):
user_services.set_username(user_id, username)
user_services.get_or_create_user(user_id, 'user@example.com')
user_services.set_username(user_id, username)
self.assertEquals(username, user_services.get_username(user_id))
def test_get_username_for_nonexistent_user(self):
with self.assertRaisesRegexp(Exception, 'User not found.'):
user_services.get_username('fakeUser')
def test_get_username_none(self):
user_services.get_or_create_user('fakeUser', 'user@example.com')
self.assertEquals(None, user_services.get_username('fakeUser'))
def test_is_username_taken_false(self):
self.assertFalse(user_services.is_username_taken('fakeUsername'))
def test_is_username_taken_true(self):
user_id = 'someUser'
username = 'newUsername'
user_services.get_or_create_user(user_id, 'user@example.com')
user_services.set_username(user_id, username)
self.assertTrue(user_services.is_username_taken(username))
def test_is_username_taken_different_case(self):
user_id = 'someUser'
username = 'camelCase'
user_services.get_or_create_user(user_id, 'user@example.com')
user_services.set_username(user_id, username)
self.assertTrue(user_services.is_username_taken('CaMeLcAsE'))
def test_set_invalid_usernames(self):
user_id = 'someUser'
user_services.get_or_create_user(user_id, 'user@example.com')
bad_usernames = [
' bob ', '@', '', 'a' * 100, 'ADMIN', 'admin', 'AdMiN2020']
for username in bad_usernames:
with self.assertRaises(utils.ValidationError):
user_services.set_username(user_id, username)
def test_invalid_emails(self):
bad_email_addresses = ['@', '@@', 'abc', '', None, ['a', '@', 'b.com']]
for email in bad_email_addresses:
with self.assertRaises(utils.ValidationError):
user_services.get_or_create_user('user_id', email)
def test_email_truncation(self):
email_addresses = [
('a@b.c', '..@b.c'),
('ab@c.d', 'a..@c.d'),
('abc@def.gh', 'a..@def.gh'),
('abcd@efg.h', 'a..@efg.h'),
('abcdefgh@efg.h', 'abcde..@efg.h'),
]
for ind, (actual_email, expected_email) in enumerate(email_addresses):
user_settings = user_services.get_or_create_user(
str(ind), actual_email)
self.assertEqual(user_settings.truncated_email, expected_email)
def test_get_email_from_username(self):
user_id = 'someUser'
username = 'username'
user_email = 'user@example.com'
user_services.get_or_create_user(user_id, user_email)
user_services.set_username(user_id, username)
self.assertEquals(user_services.get_username(user_id), username)
# Handle usernames that exist.
self.assertEquals(
user_services.get_email_from_username(username), user_email)
# Handle usernames in the same equivalence class correctly.
self.assertEquals(
user_services.get_email_from_username('USERNAME'), user_email)
# Return None for usernames which don't exist.
self.assertIsNone(
user_services.get_email_from_username('fakeUsername'))
def test_get_user_id_from_username(self):
user_id = 'someUser'
username = 'username'
user_email = 'user@example.com'
user_services.get_or_create_user(user_id, user_email)
user_services.set_username(user_id, username)
self.assertEquals(user_services.get_username(user_id), username)
# Handle usernames that exist.
self.assertEquals(
user_services.get_user_id_from_username(username), user_id)
# Handle usernames in the same equivalence class correctly.
self.assertEquals(
user_services.get_user_id_from_username('USERNAME'), user_id)
# Return None for usernames which don't exist.
self.assertIsNone(
user_services.get_user_id_from_username('fakeUsername'))
def test_fetch_gravatar_success(self):
user_email = 'user@example.com'
expected_gravatar_filepath = os.path.join(
'static', 'images', 'avatar', 'gravatar_example.png')
with open(expected_gravatar_filepath, 'r') as f:
gravatar = f.read()
with self.urlfetch_mock(content=gravatar):
profile_picture = user_services.fetch_gravatar(user_email)
gravatar_data_url = utils.convert_png_to_data_url(
expected_gravatar_filepath)
self.assertEqual(profile_picture, gravatar_data_url)
def test_fetch_gravatar_failure_404(self):
user_email = 'user@example.com'
error_messages = []
def log_mock(message):
error_messages.append(message)
gravatar_url = user_services.get_gravatar_url(user_email)
expected_error_message = (
'[Status 404] Failed to fetch Gravatar from %s' % gravatar_url)
logging_error_mock = test_utils.CallCounter(log_mock)
urlfetch_counter = test_utils.CallCounter(urlfetch.fetch)
urlfetch_mock_ctx = self.urlfetch_mock(status_code=404)
log_swap_ctx = self.swap(logging, 'error', logging_error_mock)
fetch_swap_ctx = self.swap(urlfetch, 'fetch', urlfetch_counter)
with urlfetch_mock_ctx, log_swap_ctx, fetch_swap_ctx:
profile_picture = user_services.fetch_gravatar(user_email)
self.assertEqual(urlfetch_counter.times_called, 1)
self.assertEqual(logging_error_mock.times_called, 1)
self.assertEqual(expected_error_message, error_messages[0])
self.assertEqual(
profile_picture, user_services.DEFAULT_IDENTICON_DATA_URL)
def test_fetch_gravatar_failure_exception(self):
user_email = 'user@example.com'
error_messages = []
def log_mock(message):
error_messages.append(message)
gravatar_url = user_services.get_gravatar_url(user_email)
expected_error_message = (
'Failed to fetch Gravatar from %s' % gravatar_url)
logging_error_mock = test_utils.CallCounter(log_mock)
urlfetch_fail_mock = test_utils.FailingFunction(
urlfetch.fetch, urlfetch.InvalidURLError,
test_utils.FailingFunction.INFINITY)
log_swap_ctx = self.swap(logging, 'error', logging_error_mock)
fetch_swap_ctx = self.swap(urlfetch, 'fetch', urlfetch_fail_mock)
with log_swap_ctx, fetch_swap_ctx:
profile_picture = user_services.fetch_gravatar(user_email)
self.assertEqual(logging_error_mock.times_called, 1)
self.assertEqual(expected_error_message, error_messages[0])
self.assertEqual(
profile_picture, user_services.DEFAULT_IDENTICON_DATA_URL)
def test_default_identicon_data_url(self):
identicon_filepath = os.path.join(
'static', 'images', 'avatar', 'user_blue_72px.png')
identicon_data_url = utils.convert_png_to_data_url(
identicon_filepath)
self.assertEqual(
identicon_data_url, user_services.DEFAULT_IDENTICON_DATA_URL)
def test_set_and_get_user_email_preferences(self):
user_id = 'someUser'
username = 'username'
user_email = 'user@example.com'
user_services.get_or_create_user(user_id, user_email)
user_services.set_username(user_id, username)
# When UserEmailPreferencesModel is yet to be created,
# the value returned by get_email_preferences() should be True.
email_preferences = user_services.get_email_preferences(user_id)
self.assertEquals(
email_preferences['can_receive_editor_role_email'],
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
email_preferences = user_services.get_email_preferences(user_id)
self.assertEquals(
email_preferences['can_receive_feedback_message_email'],
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
# The user retrieves their email preferences. This initializes
# a UserEmailPreferencesModel instance with the default values.
user_services.update_email_preferences(
user_id, feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
email_preferences = user_services.get_email_preferences(user_id)
self.assertEquals(
email_preferences['can_receive_editor_role_email'],
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEquals(
email_preferences['can_receive_feedback_message_email'],
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
# The user sets their membership email preference to False.
user_services.update_email_preferences(
user_id, feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE, False,
False)
email_preferences = user_services.get_email_preferences(user_id)
self.assertEquals(
email_preferences['can_receive_editor_role_email'], False)
self.assertEquals(
email_preferences['can_receive_feedback_message_email'], False)
def test_get_current_date_as_string(self):
custom_datetimes = [
datetime.date(2011, 1, 1),
datetime.date(2012, 2, 28)
]
datetime_strings = [custom_datetime.strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
for custom_datetime in custom_datetimes]
self.assertEqual(len(datetime_strings[0].split('-')[0]), 4)
self.assertEqual(len(datetime_strings[0].split('-')[1]), 2)
self.assertEqual(len(datetime_strings[0].split('-')[2]), 2)
self.assertEqual(len(datetime_strings[1].split('-')[0]), 4)
self.assertEqual(len(datetime_strings[1].split('-')[1]), 2)
self.assertEqual(len(datetime_strings[1].split('-')[2]), 2)
self.assertEqual(datetime_strings[0], '2011-01-01')
self.assertEqual(datetime_strings[1], '2012-02-28')
def test_parse_date_from_string(self):
test_datetime_strings = [
'2016-06-30',
'2016-07-05',
'2016-13-01',
'2016-03-32'
]
self.assertEqual(
user_services.parse_date_from_string(test_datetime_strings[0]),
{
'year': 2016,
'month': 6,
'day': 30
})
self.assertEqual(
user_services.parse_date_from_string(test_datetime_strings[1]),
{
'year': 2016,
'month': 7,
'day': 5
})
with self.assertRaises(ValueError):
user_services.parse_date_from_string(test_datetime_strings[2])
with self.assertRaises(ValueError):
user_services.parse_date_from_string(test_datetime_strings[3])
class UpdateContributionMsecTests(test_utils.GenericTestBase):
"""Test whether contribution date changes with publication of
exploration/collection and update of already published
exploration/collection.
"""
EXP_ID = 'test_exp'
COL_ID = 'test_col'
COLLECTION_TITLE = 'title'
COLLECTION_CATEGORY = 'category'
COLLECTION_OBJECTIVE = 'objective'
def setUp(self):
super(UpdateContributionMsecTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def test_contribution_msec_updates_on_published_explorations(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
init_state_name = exploration.init_state_name
exp_services.publish_exploration_and_update_user_profiles(
self.admin_id, self.EXP_ID)
# Test all owners and editors of exploration after publication have
# updated first contribution times in msecs.
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test editor of published exploration has updated contribution time.
rights_manager.release_ownership_of_exploration(
self.admin_id, self.EXP_ID)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [{
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_id',
'new_value': 'MultipleChoiceInput'
}], 'commit')
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_update_until_exp_is_published(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
init_state_name = exploration.init_state_name
# Test that saving an exploration does not update first contribution
# time.
self.assertIsNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test that commit to unpublished exploration does not update
# contribution time.
exp_services.update_exploration(
self.admin_id, self.EXP_ID, [{
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_id',
'new_value': 'MultipleChoiceInput'
}], '')
self.assertIsNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test that another user who commits to unpublished exploration does not
# have updated first contribution time.
rights_manager.assign_role_for_exploration(
self.admin_id, self.EXP_ID, self.editor_id, 'editor')
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [{
'cmd': 'rename_state',
'old_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'new_state_name': u'¡Hola! αβγ',
}], '')
self.assertIsNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
# Test that after an exploration is published, all contributors have
# updated first contribution time.
exp_services.publish_exploration_and_update_user_profiles(
self.admin_id, self.EXP_ID)
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_change_if_no_contribution_to_exp(self):
self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
rights_manager.assign_role_for_exploration(
self.admin_id, self.EXP_ID, self.editor_id, 'editor')
exp_services.publish_exploration_and_update_user_profiles(
self.admin_id, self.EXP_ID)
# Test that contribution time is not given to an editor that has not
# contributed.
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_change_if_exp_unpublished(self):
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
exp_services.publish_exploration_and_update_user_profiles(
self.owner_id, self.EXP_ID)
rights_manager.unpublish_exploration(self.admin_id, self.EXP_ID)
# Test that contribution time is not eliminated if exploration is
# unpublished.
self.assertIsNotNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
def test_contribution_msec_updates_on_published_collections(self):
self.save_new_valid_collection(
self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE,
category=self.COLLECTION_CATEGORY,
objective=self.COLLECTION_OBJECTIVE,
exploration_id=self.EXP_ID)
collection_services.publish_collection_and_update_user_profiles(
self.admin_id, self.COL_ID)
exp_services.publish_exploration_and_update_user_profiles(
self.admin_id, self.EXP_ID)
# Test all owners and editors of collection after publication have
# updated first contribution times.
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test editor of published collection has updated
# first contribution time.
rights_manager.release_ownership_of_collection(
self.admin_id, self.COL_ID)
collection_services.update_collection(
self.editor_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'Some new title'
}], 'Changed the title')
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_update_until_collection_is_published(
self):
self.save_new_valid_collection(
self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE,
category=self.COLLECTION_CATEGORY,
objective=self.COLLECTION_OBJECTIVE,
exploration_id=self.EXP_ID)
# Test that saving a collection does not update first contribution
# time.
self.assertIsNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test that commit to unpublished collection does not update
# contribution time.
collection_services.update_collection(
self.admin_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'Some new title'
}], '')
self.assertIsNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test that another user who commits to unpublished collection does not
# have updated first contribution time.
rights_manager.assign_role_for_collection(
self.admin_id, self.COL_ID, self.editor_id, 'editor')
collection_services.update_collection(
self.editor_id, self.COL_ID, [{
'cmd': 'edit_collection_property',
'property_name': 'category',
'new_value': 'Some new category'
}], '')
self.assertIsNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
# Test that after an collection is published, all contributors have
# updated first contribution times.
collection_services.publish_collection_and_update_user_profiles(
self.admin_id, self.COL_ID)
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_change_if_no_contribution_to_collection(
self):
self.save_new_valid_collection(
self.COL_ID, self.admin_id, title=self.COLLECTION_TITLE,
category=self.COLLECTION_CATEGORY,
objective=self.COLLECTION_OBJECTIVE,
exploration_id=self.EXP_ID)
rights_manager.assign_role_for_collection(
self.admin_id, self.COL_ID, self.editor_id, 'editor')
collection_services.publish_collection_and_update_user_profiles(
self.admin_id, self.COL_ID)
# Test that contribution time is not given to an editor that has not
# contributed.
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_change_if_collection_unpublished(self):
self.save_new_valid_collection(
self.COL_ID, self.owner_id, title=self.COLLECTION_TITLE,
category=self.COLLECTION_CATEGORY,
objective=self.COLLECTION_OBJECTIVE,
exploration_id=self.EXP_ID)
collection_services.publish_collection_and_update_user_profiles(
self.owner_id, self.COL_ID)
rights_manager.unpublish_collection(self.admin_id, self.COL_ID)
# Test that first contribution msec is not eliminated if collection is
# unpublished.
self.assertIsNotNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
class UserDashboardStatsTests(test_utils.GenericTestBase):
"""Test whether exploration-related statistics of a user change as events
are registered.
"""
OWNER_EMAIL = 'owner@example.com'
OWNER_USERNAME = 'owner'
EXP_ID = 'exp1'
USER_SESSION_ID = 'session1'
CURRENT_DATE_AS_STRING = user_services.get_current_date_as_string()
def setUp(self):
super(UserDashboardStatsTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def _mock_get_current_date_as_string(self):
return self.CURRENT_DATE_AS_STRING
def test_get_user_dashboard_stats(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
init_state_name = exploration.init_state_name
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, init_state_name, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
self.assertEquals(
user_services.get_user_dashboard_stats(self.owner_id), {
'total_plays': 0,
'average_ratings': None
})
(user_jobs_continuous_test.ModifiedUserStatsAggregator
.start_computation())
self.process_and_flush_pending_tasks()
self.assertEquals(
user_services.get_user_dashboard_stats(self.owner_id), {
'total_plays': 1,
'average_ratings': None
})
def test_get_weekly_dashboard_stats_when_stats_model_is_none(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
init_state_name = exploration.init_state_name
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, init_state_name, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
self.assertEquals(
user_services.get_weekly_dashboard_stats(self.owner_id), None)
with self.swap(user_services,
'get_current_date_as_string',
self._mock_get_current_date_as_string):
user_services.update_dashboard_stats_log(self.owner_id)
self.assertEquals(
user_services.get_weekly_dashboard_stats(self.owner_id), [{
self.CURRENT_DATE_AS_STRING: {
'total_plays': 0,
'average_ratings': None
}
}])
def test_get_weekly_dashboard_stats(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
init_state_name = exploration.init_state_name
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, init_state_name, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
self.assertEquals(
user_services.get_weekly_dashboard_stats(self.owner_id), None)
(user_jobs_continuous_test.ModifiedUserStatsAggregator
.start_computation())
self.process_and_flush_pending_tasks()
self.assertEquals(
user_services.get_weekly_dashboard_stats(self.owner_id), None)
with self.swap(user_services,
'get_current_date_as_string',
self._mock_get_current_date_as_string):
user_services.update_dashboard_stats_log(self.owner_id)
self.assertEquals(
user_services.get_weekly_dashboard_stats(self.owner_id), [{
self.CURRENT_DATE_AS_STRING: {
'total_plays': 1,
'average_ratings': None
}
}])
class SubjectInterestsUnitTests(test_utils.GenericTestBase):
"""Test the update_subject_interests method."""
def setUp(self):
super(SubjectInterestsUnitTests, self).setUp()
self.user_id = 'someUser'
self.username = 'username'
self.user_email = 'user@example.com'
user_services.get_or_create_user(self.user_id, self.user_email)
user_services.set_username(self.user_id, self.username)
def test_invalid_subject_interests_are_not_accepted(self):
with self.assertRaisesRegexp(utils.ValidationError, 'to be a list'):
user_services.update_subject_interests(self.user_id, 'not a list')
with self.assertRaisesRegexp(utils.ValidationError, 'to be a string'):
user_services.update_subject_interests(self.user_id, [1, 2, 3])
with self.assertRaisesRegexp(utils.ValidationError, 'to be non-empty'):
user_services.update_subject_interests(self.user_id, ['', 'ab'])
with self.assertRaisesRegexp(
utils.ValidationError,
'to consist only of lowercase alphabetic characters and spaces'
):
user_services.update_subject_interests(self.user_id, ['!'])
with self.assertRaisesRegexp(
utils.ValidationError,
'to consist only of lowercase alphabetic characters and spaces'
):
user_services.update_subject_interests(
self.user_id, ['has-hyphens'])
with self.assertRaisesRegexp(
utils.ValidationError,
'to consist only of lowercase alphabetic characters and spaces'
):
user_services.update_subject_interests(
self.user_id, ['HasCapitalLetters'])
with self.assertRaisesRegexp(utils.ValidationError, 'to be distinct'):
user_services.update_subject_interests(self.user_id, ['a', 'a'])
# The following cases are all valid.
user_services.update_subject_interests(self.user_id, [])
user_services.update_subject_interests(
self.user_id, ['singleword', 'has spaces'])
| zgchizi/oppia-uc | core/domain/user_services_test.py | Python | apache-2.0 | 28,809 |
# inter-node communication
from collections import defaultdict
from enum import IntEnum, unique
from plenum.common.plenum_protocol_version import PlenumProtocolVersion
from plenum.common.roles import Roles
from plenum.common.transactions import PlenumTransactions
NOMINATE = "NOMINATE"
REELECTION = "REELECTION"
PRIMARY = "PRIMARY"
PRIMDEC = "PRIMARYDECIDED"
BATCH = "BATCH"
REQACK = "REQACK"
REQNACK = "REQNACK"
REJECT = "REJECT"
POOL_LEDGER_TXNS = "POOL_LEDGER_TXNS"
PROPAGATE = "PROPAGATE"
PREPREPARE = "PREPREPARE"
OLD_VIEW_PREPREPARE_REQ = "OLD_VIEW_PREPREPARE_REQ"
OLD_VIEW_PREPREPARE_REP = "OLD_VIEW_PREPREPARE_REP"
PREPARE = "PREPARE"
COMMIT = "COMMIT"
CHECKPOINT = "CHECKPOINT"
CHECKPOINT_STATE = "CHECKPOINT_STATE"
THREE_PC_STATE = "THREE_PC_STATE"
UPDATE_BLS_MULTI_SIG = "UPDATE_BLS_MULTI_SIG"
REPLY = "REPLY"
ORDERED = "ORDERED"
REQKEY = "REQKEY"
INSTANCE_CHANGE = "INSTANCE_CHANGE"
BACKUP_INSTANCE_FAULTY = "BACKUP_INSTANCE_FAULTY"
VIEW_CHANGE_DONE = "VIEW_CHANGE_DONE"
CURRENT_STATE = "CURRENT_STATE"
VIEW_CHANGE = "VIEW_CHANGE"
VIEW_CHANGE_ACK = "VIEW_CHANGE_ACK"
NEW_VIEW = "NEW_VIEW"
LEDGER_STATUS = "LEDGER_STATUS"
CONSISTENCY_PROOF = "CONSISTENCY_PROOF"
CATCHUP_REQ = "CATCHUP_REQ"
CATCHUP_REP = "CATCHUP_REP"
MESSAGE_REQUEST = 'MESSAGE_REQUEST'
MESSAGE_RESPONSE = 'MESSAGE_RESPONSE'
OBSERVED_DATA = 'OBSERVED_DATA'
BATCH_COMMITTED = 'BATCH_COMMITTED'
VIEW_CHANGE_START = 'ViewChangeStart'
VIEW_CHANGE_CONTINUE = 'ViewChangeContinue'
BLACKLIST = "BLACKLIST"
THREE_PC_PREFIX = "3PC: "
MONITORING_PREFIX = "MONITORING: "
VIEW_CHANGE_PREFIX = "VIEW CHANGE: "
CATCH_UP_PREFIX = "CATCH-UP: "
PRIMARY_SELECTION_PREFIX = "PRIMARY SELECTION: "
BLS_PREFIX = "BLS: "
OBSERVER_PREFIX = "OBSERVER: "
PROPOSED_VIEW_NO = "proposed_view_no"
NAME = "name"
VERSION = "version"
IP = "ip"
PORT = "port"
KEYS = "keys"
TYPE = "type"
TXN_TYPE = "type"
OP_VER = "ver"
TXN_ID = "txnId"
ORIGIN = "origin"
# Use f.IDENTIFIER.nm
IDENTIFIER = "identifier"
TARGET_NYM = "dest"
DATA = "data"
RAW = "raw"
ENC = "enc"
HASH = "hash"
ALIAS = "alias"
PUBKEY = "pubkey"
VERKEY = "verkey"
BLS_KEY = "blskey"
BLS_KEY_PROOF = "blskey_pop"
NYM_KEY = "NYM"
NODE_IP = "node_ip"
NODE_PORT = "node_port"
CLIENT_IP = "client_ip"
CLIENT_PORT = "client_port"
# CHANGE_HA = "CHANGE_HA"
# CHANGE_KEYS = "CHANGE_KEYS"
SERVICES = "services"
VALIDATOR = "VALIDATOR"
CLIENT = "CLIENT"
ROLE = 'role'
NONCE = 'nonce'
ATTRIBUTES = 'attributes'
VERIFIABLE_ATTRIBUTES = 'verifiableAttributes'
PREDICATES = 'predicates'
TXN_TIME = 'txnTime'
TXN_DATA = "txnData"
LAST_TXN = "lastTxn"
TXNS = "Txns"
BY = "by"
FORCE = 'force'
AML_VERSION = 'version'
AML = 'aml'
AML_CONTEXT = 'amlContext'
AUDIT_TXN_VIEW_NO = "viewNo"
AUDIT_TXN_PP_SEQ_NO = "ppSeqNo"
AUDIT_TXN_LEDGERS_SIZE = "ledgerSize"
AUDIT_TXN_LEDGER_ROOT = "ledgerRoot"
AUDIT_TXN_STATE_ROOT = "stateRoot"
AUDIT_TXN_PRIMARIES = "primaries"
AUDIT_TXN_DIGEST = "digest"
AUDIT_TXN_NODE_REG = "nodeReg"
# State proof fields
STATE_PROOF = 'state_proof'
ROOT_HASH = "root_hash"
MULTI_SIGNATURE = "multi_signature"
PROOF_NODES = "proof_nodes"
VALUE = 'value'
MULTI_SIGNATURE_SIGNATURE = 'signature'
MULTI_SIGNATURE_PARTICIPANTS = 'participants'
MULTI_SIGNATURE_VALUE = 'value'
MULTI_SIGNATURE_VALUE_LEDGER_ID = 'ledger_id'
MULTI_SIGNATURE_VALUE_STATE_ROOT = 'state_root_hash'
MULTI_SIGNATURE_VALUE_TXN_ROOT = 'txn_root_hash'
MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT = 'pool_state_root_hash'
MULTI_SIGNATURE_VALUE_TIMESTAMP = 'timestamp'
# ROLES
IDENTITY_OWNER = Roles.IDENTITY_OWNER.value
STEWARD = Roles.STEWARD.value
TRUSTEE = Roles.TRUSTEE.value
IDENTITY_OWNER_STRING = None
STEWARD_STRING = 'STEWARD'
TRUSTEE_STRING = 'TRUSTEE'
# TXNs
NODE = PlenumTransactions.NODE.value
NYM = PlenumTransactions.NYM.value
AUDIT = PlenumTransactions.AUDIT.value
GET_TXN = PlenumTransactions.GET_TXN.value
TXN_AUTHOR_AGREEMENT = PlenumTransactions.TXN_AUTHOR_AGREEMENT.value
TXN_AUTHOR_AGREEMENT_AML = PlenumTransactions.TXN_AUTHOR_AGREEMENT_AML.value
TXN_AUTHOR_AGREEMENT_DISABLE = PlenumTransactions.TXN_AUTHOR_AGREEMENT_DISABLE.value
GET_TXN_AUTHOR_AGREEMENT = PlenumTransactions.GET_TXN_AUTHOR_AGREEMENT.value
GET_TXN_AUTHOR_AGREEMENT_AML = PlenumTransactions.GET_TXN_AUTHOR_AGREEMENT_AML.value
LEDGERS_FREEZE = PlenumTransactions.LEDGERS_FREEZE.value
GET_FROZEN_LEDGERS = PlenumTransactions.GET_FROZEN_LEDGERS.value
CURRENT_TXN_PAYLOAD_VERSIONS = defaultdict(lambda: "1")
CURRENT_TXN_PAYLOAD_VERSIONS[TXN_AUTHOR_AGREEMENT] = "2"
CURRENT_TXN_VERSION = "1"
# TXN
# TODO: manye of these constants will be replaced
# by constants from Request after Request refactoring
TXN_PAYLOAD = "txn"
TXN_PAYLOAD_TYPE = "type"
TXN_PAYLOAD_PROTOCOL_VERSION = "protocolVersion"
TXN_PAYLOAD_DATA = "data"
TXN_PAYLOAD_VERSION = "ver"
TXN_PAYLOAD_METADATA = "metadata"
TXN_PAYLOAD_METADATA_FROM = "from"
TXN_PAYLOAD_METADATA_ENDORSER = "endorser"
TXN_PAYLOAD_METADATA_REQ_ID = "reqId"
TXN_PAYLOAD_METADATA_DIGEST = "digest"
TXN_PAYLOAD_METADATA_PAYLOAD_DIGEST = "payloadDigest"
TXN_PAYLOAD_METADATA_TAA_ACCEPTANCE = "taaAcceptance"
TXN_METADATA = "txnMetadata"
TXN_METADATA_TIME = "txnTime"
TXN_METADATA_ID = "txnId"
TXN_METADATA_SEQ_NO = "seqNo"
TXN_SIGNATURE = "reqSignature"
TXN_VERSION = "ver"
TXN_SIGNATURE_TYPE = "type"
ED25519 = "ED25519"
TXN_SIGNATURE_VALUES = "values"
TXN_SIGNATURE_FROM = "from"
TXN_SIGNATURE_VALUE = "value"
TXN_AUTHOR_AGREEMENT_TEXT = "text"
TXN_AUTHOR_AGREEMENT_VERSION = "version"
TXN_AUTHOR_AGREEMENT_DIGEST = "digest"
TXN_AUTHOR_AGREEMENT_RETIREMENT_TS = "retirement_ts"
TXN_AUTHOR_AGREEMENT_RATIFICATION_TS = "ratification_ts"
GET_TXN_AUTHOR_AGREEMENT_VERSION = "version"
GET_TXN_AUTHOR_AGREEMENT_DIGEST = "digest"
GET_TXN_AUTHOR_AGREEMENT_TIMESTAMP = "timestamp"
GET_TXN_AUTHOR_AGREEMENT_AML_VERSION = "version"
GET_TXN_AUTHOR_AGREEMENT_AML_TIMESTAMP = "timestamp"
LEDGERS_IDS = "ledgers_ids"
class ClientBootStrategy(IntEnum):
Simple = 1
PoolTxn = 2
Custom = 3
class StorageType(IntEnum):
File = 1
Ledger = 2
class KeyValueStorageType(IntEnum):
Leveldb = 1
Memory = 2
Rocksdb = 3
ChunkedBinaryFile = 4
BinaryFile = 5
class PreVCStrategies(IntEnum):
VC_START_MSG_STRATEGY = 1
@unique
class LedgerState(IntEnum):
not_synced = 1 # Still gathering consistency proofs
syncing = 2 # Got sufficient consistency proofs, will be sending catchup
# requests and waiting for their replies
synced = 3 # Got replies for all catchup requests, indicating catchup
# complete for the ledger
OP_FIELD_NAME = "op"
CLIENT_STACK_SUFFIX = "C"
CLIENT_BLACKLISTER_SUFFIX = "BLC"
NODE_BLACKLISTER_SUFFIX = "BLN"
NODE_PRIMARY_STORAGE_SUFFIX = "PS"
NODE_TXN_STORE_SUFFIX = "TS"
NODE_HASH_STORE_SUFFIX = "HS"
HS_FILE = "file"
HS_MEMORY = "memory"
HS_LEVELDB = 'leveldb'
HS_ROCKSDB = 'rocksdb'
LAST_SENT_PRE_PREPARE = 'lastSentPrePrepare'
PLUGIN_BASE_DIR_PATH = "PluginBaseDirPath"
POOL_LEDGER_ID = 0
DOMAIN_LEDGER_ID = 1
CONFIG_LEDGER_ID = 2
AUDIT_LEDGER_ID = 3
# Store labels
BLS_LABEL = 'bls'
TS_LABEL = 'ts'
IDR_CACHE_LABEL = 'idr'
ATTRIB_LABEL = 'attrib'
SEQ_NO_DB_LABEL = 'seq_no_db'
NODE_STATUS_DB_LABEL = 'node_status_db'
LAST_SENT_PP_STORE_LABEL = 'last_sent_pp_store'
VALID_LEDGER_IDS = (POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID, AUDIT_LEDGER_ID)
CURRENT_PROTOCOL_VERSION = PlenumProtocolVersion.TXN_FORMAT_1_0_SUPPORT.value
OPERATION_SCHEMA_IS_STRICT = False
SCHEMA_IS_STRICT = False
GENERAL_LIMIT_SIZE = 256
| evernym/plenum | plenum/common/constants.py | Python | apache-2.0 | 7,426 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from buildbot.util import subscription
from buildbot.worker.protocols import base
class FakeTrivialConnection:
info = {}
def __init__(self):
self._disconnectSubs = subscription.SubscriptionPoint("disconnections from Fake")
def waitShutdown(self):
return defer.succeed(None)
def notifyOnDisconnect(self, cb):
return self._disconnectSubs.subscribe(cb)
def waitForNotifyDisconnectedDelivered(self):
return self._disconnectSubs.waitForDeliveriesToFinish()
def notifyDisconnected(self):
self._disconnectSubs.deliver()
def loseConnection(self):
self.notifyDisconnected()
def remoteSetBuilderList(self, builders):
return defer.succeed(None)
class FakeConnection(base.Connection):
def __init__(self, master, worker):
super().__init__(master, worker)
self._connected = True
self.remoteCalls = []
self.builders = {} # { name : isBusy }
# users of the fake can add to this as desired
self.info = {
'worker_commands': [],
'version': '0.9.0',
'basedir': '/w',
'system': 'nt',
}
def loseConnection(self):
self.notifyDisconnected()
def remotePrint(self, message):
self.remoteCalls.append(('remotePrint', message))
return defer.succeed(None)
def remoteGetWorkerInfo(self):
self.remoteCalls.append(('remoteGetWorkerInfo',))
return defer.succeed(self.info)
def remoteSetBuilderList(self, builders):
self.remoteCalls.append(('remoteSetBuilderList', builders[:]))
self.builders = dict((b, False) for b in builders)
return defer.succeed(None)
def remoteStartCommand(self, remoteCommand, builderName, commandId, commandName, args):
self.remoteCalls.append(('remoteStartCommand', remoteCommand, builderName,
commandId, commandName, args))
return defer.succeed(None)
def remoteShutdown(self):
self.remoteCalls.append(('remoteShutdown',))
return defer.succeed(None)
def remoteStartBuild(self, builderName):
self.remoteCalls.append(('remoteStartBuild', builderName))
return defer.succeed(None)
def remoteInterruptCommand(self, builderName, commandId, why):
self.remoteCalls.append(
('remoteInterruptCommand', builderName, commandId, why))
return defer.succeed(None)
| anish/buildbot | master/buildbot/test/fake/fakeprotocol.py | Python | gpl-2.0 | 3,207 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['PolyTrend'] , ['Seasonal_Hour'] , ['AR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_None/model_control_one_enabled_None_PolyTrend_Seasonal_Hour_AR.py | Python | bsd-3-clause | 150 |
# Copyright (C) 2013-2017 Chris Lalancette <clalancette@gmail.com>
# Copyright (C) 2013 Ian McLeod <imcleod@redhat.com>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
RHEL-7 installation
"""
import os
import oz.ozutil
import oz.RedHat
import oz.OzException
class RHEL7Guest(oz.RedHat.RedHatLinuxCDYumGuest):
"""
Class for RHEL-7 installation
"""
def __init__(self, tdl, config, auto, output_disk=None, netdev=None,
diskbus=None, macaddress=None):
oz.RedHat.RedHatLinuxCDYumGuest.__init__(self, tdl, config, auto,
output_disk, netdev, diskbus,
True, True, "cpio", macaddress,
True)
self.virtio_channel_name = 'org.fedoraproject.anaconda.log.0'
def _modify_iso(self):
"""
Method to modify the ISO for autoinstallation.
"""
self._copy_kickstart(os.path.join(self.iso_contents, "ks.cfg"))
initrdline = " append initrd=initrd.img ks=cdrom:/dev/cdrom:/ks.cfg"
if self.tdl.installtype == "url":
initrdline += " repo=" + self.url + "\n"
else:
# RHEL6 dropped this command line directive due to an Anaconda bug
# that has since been fixed. Note that this used to be "method="
# but that has been deprecated for some time.
initrdline += " repo=cdrom:/dev/cdrom"
self._modify_isolinux(initrdline)
def get_auto_path(self):
"""
Method to create the correct path to the RHEL 7 kickstart file.
"""
return oz.ozutil.generate_full_auto_path("RHEL7.auto")
def get_class(tdl, config, auto, output_disk=None, netdev=None, diskbus=None,
macaddress=None):
"""
Factory method for RHEL-7 installs.
"""
if tdl.update.isdigit():
if netdev is None:
netdev = 'virtio'
if diskbus is None:
diskbus = 'virtio'
return RHEL7Guest(tdl, config, auto, output_disk, netdev, diskbus,
macaddress)
def get_supported_string():
"""
Return supported versions as a string.
"""
return "RHEL 7"
| imcleod/oz | oz/RHEL_7.py | Python | lgpl-2.1 | 2,885 |
import os
import matplotlib.pyplot as plt
import numpy as np
from plotting_styles import onecolumn_figure, default_figure
from paths import paper1_figures_path
'''
Make a UV plot of the 1000th HI channel.
'''
uvw = np.load("/mnt/MyRAID/M33/VLA/14B-088/HI/"
"14B-088_HI_LSRK.ms.contsub_channel_1000.uvw.npy")
onecolumn_figure()
fig = plt.figure()
ax = fig.add_subplot(111) # , rasterized=True)
# plt.hexbin(uvw[0], uvw[1], bins='log', cmap='afmhot_r')
ax.scatter(uvw[0], uvw[1], s=0.1, color='k', rasterized=True)
plt.xlabel("U (m)")
plt.ylabel("V (m)")
plt.xlim([-3200, 3500])
plt.ylim([-3200, 3200])
plt.grid()
plt.tight_layout()
plt.savefig(paper1_figures_path("m33_hi_uv_plane_chan1000.pdf"))
plt.savefig(paper1_figures_path("m33_hi_uv_plane_chan1000.png"))
plt.close()
default_figure()
| e-koch/VLA_Lband | 14B-088/HI/analysis/uv_plots/channel_1000_uvplot.py | Python | mit | 813 |
import tensorflow as tf
from layers import conv2d, linear, nnupsampling, batchnorm, pool
from activations import lrelu
import numpy as np
from utils import drawblock, createfolders
from scipy.misc import imsave
import os
# Create folders to store images
gen_dir, gen_dir128 = createfolders("./genimgs/CIFAR64GANAEsample", "/gen", "/gen64")
# Parameters
batch_size = 100
zdim = 100
n_classes = 10
gname = 'g_'
tf.set_random_seed(5555) # use different seed to generate different set of images
# Graph input
z = tf.random_uniform([batch_size, zdim], -1, 1)
iny = tf.constant(np.tile(np.eye(n_classes, dtype=np.float32), [batch_size / n_classes + 1, 1])[:batch_size, :])
# Generator
def generator(inp_z, inp_y, reuse=False):
with tf.variable_scope('Generator', reuse=reuse):
inp = tf.concat([inp_z, inp_y], 1)
g1 = linear(inp, 512 * 4 * 4, name=gname + 'deconv1')
g1 = batchnorm(g1, is_training=tf.constant(True), name=gname + 'bn1g')
g1 = lrelu(g1, 0.2)
g1_reshaped = tf.reshape(g1, [-1, 512, 4, 4])
print 'genreshape: ' + str(g1_reshaped.get_shape().as_list())
g2 = nnupsampling(g1_reshaped, [8, 8])
g2 = conv2d(g2, nout=256, kernel=3, name=gname + 'deconv2')
g2 = batchnorm(g2, is_training=tf.constant(True), name=gname + 'bn2g')
g2 = lrelu(g2, 0.2)
g3 = nnupsampling(g2, [16, 16])
g3 = conv2d(g3, nout=128, kernel=3, name=gname + 'deconv3')
g3 = batchnorm(g3, is_training=tf.constant(True), name=gname + 'bn3g')
g3 = lrelu(g3, 0.2)
g3b = conv2d(g3, nout=128, kernel=3, name=gname + 'deconv3b')
g3b = batchnorm(g3b, is_training=tf.constant(True), name=gname + 'bn3bg')
g3b = lrelu(g3b, 0.2)
g4 = nnupsampling(g3b, [32, 32])
g4 = conv2d(g4, nout=64, kernel=3, name=gname + 'deconv4')
g4 = batchnorm(g4, is_training=tf.constant(True), name=gname + 'bn4g')
g4 = lrelu(g4, 0.2)
g4b = conv2d(g4, nout=64, kernel=3, name=gname + 'deconv4b')
g4b = batchnorm(g4b, is_training=tf.constant(True), name=gname + 'bn4bg')
g4b = lrelu(g4b, 0.2)
g5 = nnupsampling(g4b, [64, 64])
g5 = conv2d(g5, nout=32, kernel=3, name=gname + 'deconv5')
g5 = batchnorm(g5, is_training=tf.constant(True), name=gname + 'bn5g')
g5 = lrelu(g5, 0.2)
g5b = conv2d(g5, nout=3, kernel=3, name=gname + 'deconv5b')
g5b = tf.nn.tanh(g5b)
g5b_32 = pool(g5b, fsize=3, strides=2, op='avg', pad='SAME')
return g5b_32, g5b
# Call functions
samples, samples128 = generator(z, iny)
# Initialize the variables
init = tf.global_variables_initializer()
# Config for session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Generate
with tf.Session(config=config) as sess:
sess.run(init)
saver = tf.train.Saver(max_to_keep=None)
saver.restore(sess=sess, save_path='./models/CIFAR64GANAE/cdgan50000.ckpt')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# run generator
gen_img, gen_img128 = sess.run([samples, samples128])
# Store Generated
genmix_imgs = (np.transpose(gen_img, [0, 2, 3, 1]) + 1.) * 127.5
genmix_imgs = np.uint8(genmix_imgs[:, :, :, ::-1])
genmix_imgs = drawblock(genmix_imgs, n_classes)
imsave(os.path.join(gen_dir, 'sample1.jpg'), genmix_imgs)
# Store Generated 64
genmix_imgs = (np.transpose(gen_img128, [0, 2, 3, 1]) + 1.) * 127.5
genmix_imgs = np.uint8(genmix_imgs[:, :, :, ::-1])
genmix_imgs = drawblock(genmix_imgs, n_classes)
imsave(os.path.join(gen_dir128, 'sample1.jpg'), genmix_imgs)
coord.request_stop()
coord.join(threads)
| cs-chan/ICIP2016-PC | ArtGAN/CIFAR64GANAEsample.py | Python | bsd-3-clause | 3,708 |
#!/home/kazimieras/Desktop/Hack/Backend/env/bin/python
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
# --------------------------------------------------------------------
# an image animation player
class UI(Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| Glasgow2015/team-10 | env/bin/player.py | Python | apache-2.0 | 2,209 |
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/rss-student/rss-2014-team-3/src/robotbrain/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| WeirdCoder/rss-2014-team-3 | devel/lib/python2.7/dist-packages/robotbrain/__init__.py | Python | mit | 1,010 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, unittest
class TestDynamicLinks(unittest.TestCase):
def setUp(self):
frappe.db.sql('delete from `tabEmail Unsubscribe`')
def test_delete_normal(self):
event = frappe.get_doc({
'doctype': 'Event',
'subject':'test-for-delete',
'starts_on': '2014-01-01',
'event_type': 'Public'
}).insert()
unsub = frappe.get_doc({
'doctype': 'Email Unsubscribe',
'email': 'test@example.com',
'reference_doctype': event.doctype,
'reference_name': event.name
}).insert()
event.delete()
self.assertFalse(frappe.db.exists('Email Unsubscribe', unsub.name))
def test_delete_with_comment(self):
event = frappe.get_doc({
'doctype': 'Event',
'subject':'test-for-delete-1',
'starts_on': '2014-01-01',
'event_type': 'Public'
}).insert()
event.add_comment('Comment', 'test')
self.assertTrue(frappe.get_all('Communication',
filters={'reference_doctype':'Event', 'reference_name':event.name}))
event.delete()
self.assertFalse(frappe.get_all('Communication',
filters={'reference_doctype':'Event', 'reference_name':event.name}))
def test_custom_fields(self):
from frappe.utils.testutils import add_custom_field, clear_custom_fields
add_custom_field('Event', 'test_ref_doc', 'Link', 'DocType')
add_custom_field('Event', 'test_ref_name', 'Dynamic Link', 'test_ref_doc')
unsub = frappe.get_doc({
'doctype': 'Email Unsubscribe',
'email': 'test@example.com',
'global_unsubscribe': 1
}).insert()
event = frappe.get_doc({
'doctype': 'Event',
'subject':'test-for-delete-2',
'starts_on': '2014-01-01',
'event_type': 'Public',
'test_ref_doc': unsub.doctype,
'test_ref_name': unsub.name
}).insert()
self.assertRaises(frappe.LinkExistsError, unsub.delete)
event.test_ref_doc = None
event.test_ref_name = None
event.save()
unsub.delete()
clear_custom_fields('Event')
| elba7r/builder | frappe/tests/test_dynamic_links.py | Python | mit | 2,014 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .doppelganger_simulator import DPGANSimulator
| intel-analytics/BigDL | python/chronos/src/bigdl/chronos/simulator/__init__.py | Python | apache-2.0 | 638 |
"""
WSGI config for octo_nemesis project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "octo_nemesis.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| monkeywidget/massive-octo-nemesis | octo_nemesis/octo_nemesis/wsgi.py | Python | gpl-2.0 | 399 |
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
| kadhikari/navitia | source/jormungandr/jormungandr/parking_space_availability/car/__init__.py | Python | agpl-3.0 | 1,258 |
##############################################################################
#
# Copyright (C) 2021 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from werkzeug.exceptions import Unauthorized
from odoo import http
from odoo.http import request, Controller
from odoo.addons.cms_form.controllers.main import FormControllerMixin
_logger = logging.getLogger(__name__)
class ZoomRegistration(Controller, FormControllerMixin):
@http.route(
[
"/zoom/<model('res.partner.zoom.session'):session>/register",
"/zoom/register"
],
type="http", auth="public", methods=["GET", "POST"], website=True,
sitemap=False)
def zoom_registration(self, session=None, **kwargs):
if session is None:
# Allow to register in the current zoom session for 15 minutes after start
start = datetime.now() - relativedelta(minutes=15)
session = request.env["res.partner.zoom.session"].get_next_session(start)
if not session.website_published:
raise Unauthorized()
participant = request.env["res.partner.zoom.attendee"]
if request.env.user and request.env.user != request.env.ref("base.public_user"):
partner = request.env.user.partner_id
kwargs["partner_id"] = partner.id
participant = session.participant_ids.filtered(
lambda p: p.partner_id == partner)
kwargs["zoom_session_id"] = session.id
form = self.get_form("res.partner.zoom.attendee", participant.id, **kwargs)
form.form_process(**kwargs)
return request.render(
"partner_communication_switzerland.zoom_registration_template",
{"session": session,
"form": form,
"main_object": participant}
)
| eicher31/compassion-switzerland | partner_communication_switzerland/controllers/zoom_registration.py | Python | agpl-3.0 | 2,131 |
'''
t5_cli.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright (C) 2008-2009 Nikolaus Rath <Nikolaus@rath.org>
This program can be distributed under the terms of the GNU GPLv3.
'''
from __future__ import division, print_function
import errno
import llfuse
import os.path
import s3ql.cli.ctrl
import s3ql.cli.lock
import s3ql.cli.remove
import sys
import t4_fuse
import unittest2 as unittest
class cliTests(t4_fuse.fuse_tests):
def runTest(self):
self.mkfs()
self.mount()
self.tst_lock_rm()
self.tst_ctrl_flush()
self.umount()
self.fsck()
def tst_ctrl_flush(self):
try:
s3ql.cli.ctrl.main(['flushcache', self.mnt_dir])
except:
sys.excepthook(*sys.exc_info())
self.fail("s3qlctrl raised exception")
def tst_lock_rm(self):
# Extract tar
tempdir = os.path.join(self.mnt_dir, 'lock_dir')
filename = os.path.join(tempdir, 'myfile')
os.mkdir(tempdir)
with open(filename, 'w') as fh:
fh.write('Hello, world')
# copy
try:
s3ql.cli.lock.main([tempdir])
except:
sys.excepthook(*sys.exc_info())
self.fail("s3qllock raised exception")
# Try to delete
with self.assertRaises(OSError) as cm:
os.unlink(filename)
self.assertEqual(cm.exception[0], errno.EPERM)
# Try to write
with self.assertRaises(IOError) as cm:
open(filename, 'w+').write('Hello')
self.assertEqual(cm.exception[0], errno.EPERM)
# delete properly
try:
s3ql.cli.remove.main([tempdir])
except:
sys.excepthook(*sys.exc_info())
self.fail("s3qlrm raised exception")
self.assertTrue('lock_dir' not in llfuse.listdir(self.mnt_dir))
# Somehow important according to pyunit documentation
def suite():
return unittest.makeSuite(cliTests)
# Allow calling from command line
if __name__ == "__main__":
unittest.main()
| drewlu/ossql | tests/t5_cli.py | Python | gpl-3.0 | 2,085 |
from logs import sonarlog
import conf_domainsize
import conf_nodes
import placement_bestfit
import numpy as np
# Setup Sonar logging
logger = sonarlog.getLogger('placement')
class BestFitDemand(placement_bestfit.BestFit):
def sort(self, host_choice, _key):
return sorted(host_choice, key = _key)
def test_nodes(self, new_domain, node_list):
host_choice = []
for node in node_list:
# Get actual CPU measurements
curr_cpu_demand = np.percentile(node.get_readings(), 95)
# Memory demand is calculated by summing up all VM reservations
mem_load = 0
# Calculate the node utilization by accumulating all domain loads
for dom in node.domains.values():
spec = dom.domain_configuration.get_domain_spec()
mem_load += spec.total_memory()
# Calculate metric
spec = conf_domainsize.get_domain_spec(new_domain.size)
mem_delta = conf_nodes.NODE_MEM - (mem_load + spec.total_memory())
# Calculate estiated CPU demand if VM is almost
vm_cpu_demand = conf_nodes.to_node_load(95, new_domain.size)
cpu_delta = conf_nodes.UTIL - curr_cpu_demand - vm_cpu_demand
# Calculate fit metric
metric = cpu_delta * mem_delta
# Server is not able to handle the domain
if cpu_delta < 0 or mem_delta < 0:
continue
# Add metric to the choice list
host_choice.append((node.name, metric))
# Check if we found at least one host
if not host_choice:
return None
# Sort host choice list
host_choice = self.sort(host_choice, lambda x: x[1])
# Pkc hte one with the lowest metric (best fit)
return host_choice[0][0]
| jacksonicson/paper.IS2015 | control/Control/src/balancer/placement_bestfit_demand.py | Python | mit | 1,999 |
#!/usr/bin/python
# miscgapbinary.py v0.1 1/21/2012 Jeff Doak jeff.w.doak@gmail.com
import scipy as sp
from scipy.optimize import leastsq
import BinaryMixingModel as bmm
#from scipy.interpolate import UnivariateSpline
import sys
BOLTZCONST = 8.617e-2 #meV/K
class MiscGapBinary:
"""
Class that calculates a pseudo-binary miscibility gap based on an
analytical solution model of mixing.
"""
def __init__(self,model):
self.orderX = model.orderX
self.orderT = model.orderT
self.RK_coeff = model.fit_vec
def system(self,x_vec,T):
"""
System of equations defining two-phase equilibrium in a pseudo-binary
alloy.
"""
f = [bmm.dfmin_dx(self.orderX,self.orderT,x_vec[0],T)
- bmm.dfmin_dx(self.orderX,self.orderT,x_vec[1],T)]
f.append((bmm.rk_poly(self.orderX,self.orderT,x_vec[1],T)
- bmm.rk_poly(self.orderX,self.orderT,x_vec[0],T))
/(x_vec[1]-x_vec[0])
- bmm.dfmin_dx(self.orderX,self.orderT,x_vec[0],T))
return f
| jeffwdoak/free_energies | free_energies/miscgapbinary.py | Python | mit | 1,083 |
# -*- coding: utf-8 -*-
"""
This is an integration "unit" test.
"""
# from canaimagnulinux.web.theme.config import DEPENDENCIES
from canaimagnulinux.web.theme.config import PROJECTNAME
from canaimagnulinux.web.theme.testing import INTEGRATION_TESTING
from plone import api
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
import unittest
class InstallTestCase(unittest.TestCase):
""" The class that tests the installation of a particular product. """
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.qi = api.portal.get_tool('portal_quickinstaller')
def test_installed(self):
""" This method test the default GenericSetup profile of this package. """
qi = getattr(self.portal, 'portal_quickinstaller')
self.assertTrue(qi.isProductInstalled(PROJECTNAME))
# def test_dependencies_installed(self):
# """ This method test that dependencies products are installed of this package. """
# for p in DEPENDENCIES:
# self.assertTrue(self.qi.isProductInstalled(p),
# '{0} not installed'.format(p))
class UninstallTestCase(unittest.TestCase):
layer = INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
setRoles(self.portal, TEST_USER_ID, ['Manager'])
self.qi = getattr(self.portal, 'portal_quickinstaller')
self.qi.uninstallProducts(products=[PROJECTNAME])
def test_uninstalled(self):
""" This method test the uninstall GenericSetup profile of this package. """
self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))
| CanaimaGNULinux/canaimagnulinux.web.theme | canaimagnulinux/web/theme/tests/test_setup.py | Python | gpl-3.0 | 1,684 |
# -----------------------------------------------------------------------
# OpenXenManager
#
# Copyright (C) 2009 Alberto Gonzalez Rodriguez alberto@pesadilla.org
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
import xmlrpclib, urllib
import asyncore, socket
import select
import gtk
from os import chdir
import platform
import sys, shutil
import datetime
from threading import Thread
from configobj import ConfigObj
import xml.dom.minidom
from operator import itemgetter
import pdb
import rrdinfo
import time
import gobject
from openxenmanager.messages import messages, messages_header
import xml.sax.saxutils as saxutils
class oxcSERVERstorage:
stg_ref = None
stg_uuid = None
def fill_hw_hba(self, ref, list):
#<?xml version="1.0"?><methodCall><methodName>SR.probe</methodName><params><param><value><string>OpaqueRef:c9ea013c-cbce-0e85-6863-66d8e7b66ea7</string></value></param><param><value><string>OpaqueRef:5c0a69d1-7719-946b-7f3c-683a7058338d</string></value></param><param><value><struct /></value></param><param><value><string>lvmohba</string></value></param><param><value><struct /></value></param></params></methodCall>
list.clear()
res = self.connection.SR.probe(self.session_uuid, ref, {}, "lvmohba", {})
if len(res['ErrorDescription']) > 2:
result = res['ErrorDescription'][3]
dom = xml.dom.minidom.parseString(result)
nodes = dom.getElementsByTagName("BlockDevice")
disks = {}
for node in nodes:
size = self.convert_bytes(node.getElementsByTagName("size")[0].childNodes[0].data.strip())
serial = node.getElementsByTagName("serial")[0].childNodes[0].data.strip()
scsiid = node.getElementsByTagName("SCSIid")[0].childNodes[0].data.strip()
adapter = node.getElementsByTagName("adapter")[0].childNodes[0].data.strip()
channel = node.getElementsByTagName("channel")[0].childNodes[0].data.strip()
id = node.getElementsByTagName("id")[0].childNodes[0].data.strip()
lun = node.getElementsByTagName("lun")[0].childNodes[0].data.strip()
vendor = node.getElementsByTagName("vendor")[0].childNodes[0].data.strip()
path = node.getElementsByTagName("path")[0].childNodes[0].data.strip()
if vendor not in disks:
disks[vendor] = []
disks[vendor].append([" %s %s %s %s:%s:%s:%s" % (size, serial, scsiid, adapter, channel, id, lun),scsiid,path])
for ref in disks.keys():
list.append(["<b>" + ref + "</b>", False, "", ""])
for lun in disks[ref]:
list.append([lun[0], True, lun[1], lun[2]])
return 0
else:
self.wine.show_error_dlg("No LUNs were found. Please verify your hardware configuration")
return 1
def rescan_isos(self, ref):
res = self.connection.Async.SR.scan(self.session_uuid, ref)
if "Value" in res:
self.track_tasks[res['Value']] = ref
else:
print res
def detach_storage(self, ref):
for pbd in self.all_storage[ref]['PBDs']:
res = self.connection.Async.PBD.unplug(self.session_uuid, pbd)
if "Value" in res:
self.track_tasks[res['Value']] = ref
else:
print res
if "Value" in res:
value = res["Value"]
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
res = self.connection.PBD.destroy(self.session_uuid, pbd)
if "Value" in res:
self.track_tasks[res['Value']] = ref
else:
print res
def forget_storage(self, ref):
if self.all_storage[ref]['allowed_operations'].count("unplug"):
for pbd in self.all_storage[ref]['PBDs']:
res = self.connection.Async.PBD.unplug(self.session_uuid, pbd)
if "Value" in res:
self.track_tasks[res['Value']] = ref
value = res["Value"]
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
res = self.connection.Async.PBD.destroy(self.session_uuid, pbd)
if "Value" in res:
self.track_tasks[res['Value']] = ref
value = res["Value"]
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
else:
print res
else:
print res
res = self.connection.Async.SR.forget(self.session_uuid, ref)
if "Value" in res:
self.track_tasks[res['Value']] = ref
else:
print res
def delete_vdi(self, ref_vdi, ref_vm):
for ref_vbd in self.all_vdi[ref_vdi]['VBDs']:
res = self.connection.VBD.destroy(self.session_uuid, ref_vbd)
if "Value" in res:
self.track_tasks[res['Value']] = ref_vm
else:
print res
res = self.connection.VDI.destroy(self.session_uuid, ref_vdi)
if "Value" in res:
self.track_tasks[res['Value']] = ref_vm
else:
print res
def reattach_nfs_iso(self, sr, name, share, options):
# FIXME
ref = self.all_hosts.keys()[0]
pbd = {
"uuid" : "",
"host" : ref,
"SR" : sr,
"device_config" : {
"location" : share,
"options": options
},
"currentyle_attached" : False,
"other_config" : {}
}
self.connection.SR.set_name_label(self.session_uuid, sr, name)
self.connection.SR.set_name_description(self.session_uuid, sr, "NFS ISO Library [%s]" % (share))
res = self.connection.Async.PBD.create(self.session_uuid, pbd)
if "Value" in res:
self.track_tasks[res['Value']] = ref
value = res['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
result = saxutils.unescape(task['result']).replace("<value>","").replace("</value>","").replace(""", '"')
res = self.connection.Async.PBD.plug(self.session_uuid, result)
value = res['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
if task["status"] == "success":
return 0
else:
self.wine.show_error_dlg(str(task["error_info"]))
return 1
else:
print res
def create_nfs_iso(self, ref, name, share, options):
sr = {
"location" : share,
"options" : options
}
value = self.connection.SR.create(self.session_uuid, ref, sr, "0", name, "NFS ISO Library [%s]" % (share), "iso", "iso", True, {})
if "ErrorDescription" in value:
self.wine.show_error_dlg(value["ErrorDescription"][2])
return 1
else:
return 0
def reattach_cifs_iso(self, sr, name, share, options, user="", password=""):
ref = self.all_hosts.keys()[0]
pbd = {
"uuid" : "",
"host" : ref,
"SR" : sr,
"device_config" : {
"location" : share,
"type": "cifs",
"options": options
},
"currentyle_attached" : False,
"other_config" : {}
}
self.connection.SR.set_name_label(self.session_uuid, sr, name)
self.connection.SR.set_name_description(self.session_uuid, sr, "CIFS ISO Library [%s]" % (share))
res = self.connection.Async.PBD.create(self.session_uuid, pbd)
if "Value" in res:
self.track_tasks[res['Value']] = ref
value = res['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
result = saxutils.unescape(task['result']).replace("<value>","").replace("</value>","").replace(""", '"')
res = self.connection.Async.PBD.plug(self.session_uuid, result)
value = res['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
if task["status"] == "success":
return 0
else:
self.wine.show_error_dlg(str(task["error_info"]))
return 1
else:
print res
def create_cifs_iso(self, ref, name, share, options, user="", password=""):
sr = {
"location" : share,
"type" : "cifs",
"options" : options,
"username" : user,
"cifspassword" : password,
}
value = self.connection.SR.create(self.session_uuid, ref, sr, "0", name, "CIFS ISO Library [%s]" % (share), "iso", "iso", True, {})
if "ErrorDescription" in value:
self.wine.show_error_dlg(value["ErrorDescription"][2])
return 1
else:
return 0
def create_nfs_vhd(self, ref, name, host, path, options, create=None):
sr = {
"serverpath" : path,
"server" : host,
"options" : options
}
res = self.connection.SR.create(self.session_uuid, ref, sr, str(0), name, "NF SR [%s:%s]" % (host, path), "nfs", "", True, {})
if "Value" in res:
self.track_tasks[res['Value']] = ref
else:
print res
def create_aoe(self, ref, name, path, create=None):
sr = {
"device" : path,
}
res = self.connection.SR.create(self.session_uuid, ref, sr, str(0), name, "AoE SR [%s]" % (path), "lvm", "", True, {})
if "Value" in res:
self.track_tasks[res['Value']] = ref
else:
print res
def reattach_aoe(self, ref, name, path, create, uuid):
sr = self.connection.SR.get_by_uuid(self.session_uuid, uuid)
sr = self.connection.SR.introduce(self.session_uuid, uuid, name, "AOE SR [%s]" % (path), "lvm", "", True, {})['Value']
pbd = {
"uuid" : "",
"host" : ref,
"SR" : sr,
"device_config" : {
"device" : path,
},
"currently_attached" : False,
"other_config" : {}
}
ref = self.connection.PBD.create(self.session_uuid, pbd)['Value']
self.connection.PBD.plug(self.session_uuid, ref)
def reattach_nfs_vhd(self, ref, name, host, path, options, create, uuid):
sr = self.connection.SR.get_by_uuid(self.session_uuid, uuid)
sr = self.connection.SR.introduce(self.session_uuid, uuid, name, "NFS SR [%s:%s]" % (host, path), "nfs", "", True, {})['Value']
pbd = {
"uuid" : "",
"host" : ref,
"SR" : sr,
"device_config" : {
"serverpath" : path,
"server" : host,
"options": options
},
"currently_attached" : False,
"other_config" : {}
}
ref = self.connection.PBD.create(self.session_uuid, pbd)['Value']
self.connection.PBD.plug(self.session_uuid, ref)
def format_hardware_hba(self, ref, uuid, name, path):
sr = {
"SCSIid" : uuid,
}
res = self.connection.SR.create(self.session_uuid, ref, sr, "0", name, "Hardware HBA SR [%s]" % (path), "lvmohba", "", False, {})
if "Value" in res:
self.track_tasks[res['Value']] = self.host_vm[ref][0]
print self.connection.SR.set_other_config(self.session_uuid, res['Value'], {"auto-scan": "false"})
else:
print res
def reattach_and_introduce_hardware_hba(self, ref, uuid, name, path):
res = self.connection.SR.introduce(self.session_uuid, self.stg_uuid, name, "Hardware HBA SR [%s]" % (path), "lvmohba", "", False, {})
pbd = {
"uuid" : "",
"host" : ref,
"SR" : res['Value'],
"device_config" : {
"SCSIid" : uuid,
},
"currentyle_attached" : False,
"other_config" : {}
}
res = self.connection.Async.PBD.create(self.session_uuid, pbd)
if "Value" in res:
self.track_tasks[res['Value']] = self.host_vm[ref][0]
value = res['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
result = saxutils.unescape(task['result']).replace("<value>","").replace("</value>","").replace(""", '"')
res = self.connection.Async.PBD.plug(self.session_uuid, result)
value = res['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
if task["status"] == "success":
return 0
else:
self.wine.show_error_dlg(str(task["error_info"]))
return 1
else:
print res
def reattach_hardware_hba(self, ref, uuid, name, path):
ref = self.all_hosts.keys()[0]
pbd = {
"uuid" : "",
"host" : ref,
"SR" : self.stg_ref,
"device_config" : {
"SCSIid" : uuid,
},
"currentyle_attached" : False,
"other_config" : {}
}
self.connection.SR.set_name_label(self.session_uuid, self.stg_ref, name)
self.connection.SR.set_name_description(self.session_uuid, self.stg_ref, "Hardware HBA SR [%s]" % (path))
res = self.connection.Async.PBD.create(self.session_uuid, pbd)
if "Value" in res:
self.track_tasks[res['Value']] = self.host_vm[ref][0]
value = res['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
result = saxutils.unescape(task['result']).replace("<value>","").replace("</value>","").replace(""", '"')
res = self.connection.Async.PBD.plug(self.session_uuid, result)
value = res['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
if task["status"] == "success":
return 0
else:
self.wine.show_error_dlg(str(task["error_info"]))
return 1
else:
print res
pass
"""
sr = {
"SCSIid" : uuid,
}
res = self.connection.SR.create(self.session_uuid, ref, sr, "0", name, "Hardware HBA SR [IBM - %s]" % (path), "lvmohba", "", False, {})
if "Value" in res:
self.track_tasks[res['Value']] = self.host_vm[ref][0]
print self.connection.SR.set_other_config(self.session_uuid, res['Value'], {"auto-scan": "false"})
else:
print res
"""
def check_hardware_hba(self, ref, uuid, text):
result = self.connection.SR.probe(self.session_uuid, ref, {"SCSIid" : uuid }, "lvmohba", {})['Value']
dom = xml.dom.minidom.parseString(result)
nodes = dom.getElementsByTagName("UUID")
if len(nodes):
reattach = True
self.stg_uuid = nodes[0].childNodes[0].data.strip()
for storage_ref in self.all_storage.keys():
storage = self.all_storage[storage_ref]
if storage["uuid"] == self.stg_uuid:
self.stg_ref = storage_ref
if len(storage['PBDs']):
reattach = False
if reattach:
if self.stg_ref:
return [2, self.all_storage[self.stg_ref]['name_label'], self.all_hosts[ref]['name_label']]
else:
return [3, text, self.all_hosts[ref]['name_label']]
else:
return [1, self.all_storage[self.stg_ref]['name_label'], self.all_hosts[ref]['name_label']]
else:
return [0, None, None]
def check_iscsi(self, ref, name, host, port, scsiid, targetiqn, user, password):
sr = {
"port" : port,
"target" : host,
"SCSIid" : scsiid,
"targetIQN" : targetiqn
}
if user:
sr["chapuser"] = user
if password:
sr["chappassword"] = password
value = self.connection.Async.SR.probe(self.session_uuid, ref, sr, "lvmoiscsi", {})['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
result = saxutils.unescape(task['result']).replace("<value>","").replace("</value>","").replace(""", '"')
print result
dom = xml.dom.minidom.parseString(result)
nodes = dom.getElementsByTagName("UUID")
if len(nodes):
return nodes[0].childNodes[0].data.strip()
else:
return None
#ref = self.connection.SR.create(self.session_uuid, ref, sr, "0", name, "iSCSI SR [%s (%s)]" % (host, targetiqn), "lvmoiscsi", "", True, {})
#print ref
def create_iscsi(self, ref, name, host, port, scsiid, targetiqn, user, password):
sr = {
"port" : port,
"target" : host,
"SCSIid" : scsiid,
"targetIQN" : targetiqn
}
if user:
sr["chapuser"] = user
if password:
sr["chappassword"] = password
res = self.connection.Async.SR.create(self.session_uuid, ref, sr, "0", name, "iSCSI SR [%s (%s)]" % (host, targetiqn), "lvmoiscsi", "", True, {})
def reattach_iscsi(self, ref, name, host, port, scsiid, targetiqn, user, password, lun):
res = self.connection.SR.introduce(self.session_uuid, lun, name, "iSCSI SR [%s (%s)]" % (host, targetiqn), "lvmoiscsi", "", True, {})
print res
pbd = {
"uuid" : "",
"host" : ref,
"SR" : res['Value'],
"device_config" : {
"port" : port,
"target" : host,
"SCSIid" : scsiid,
"targetIQN" : targetiqn
},
"currently_attached" : False,
"other_config" : {}
}
if user:
pbd["device_config"]["chapuser"] = user
if password:
pbd["device_config"]["chappassword"] = password
res = self.connection.PBD.create(self.session_uuid, pbd)
print res
print self.connection.Async.PBD.plug(self.session_uuid, res['Value'])
"""
sr = {
"port" : port,
"target" : host,
"SCSIid" : scsiid,
"targetIQN" : targetiqn
}
if user:
sr["chapuser"] = user
if password:
sr["chappassword"] = password
res = self.connection.Async.SR.create(self.session_uuid, ref, sr, "0", name, "iSCSI SR [%s (%s)]" % (host, targetiqn), "lvmoiscsi", "", True, {})
"""
def scan_aoe(self, ref, lista, path):
sr = {
"device" : path,
}
value = self.connection.Async.SR.probe(self.session_uuid, ref, sr, "lvm", {})['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
print task
if task['result'].count("<value>"):
result = saxutils.unescape(task['result']).replace("<value>","").replace("</value>","").replace(""", '"')
dom = xml.dom.minidom.parseString(result)
nodes = dom.getElementsByTagName("SRlist")
if len(nodes[0].childNodes):
for i in range(1,len(nodes[0].childNodes),2):
ref = nodes[0].childNodes[i].childNodes[1].childNodes[0].data.strip()
print ref
print self.search_storage_uuid(ref)
if self.search_storage_uuid(ref) == False:
lista.append([ref, ref])
if lista.__len__() > 0:
return 2
else:
return 1
else:
if len(task["error_info"]) > 2:
self.wine.show_error_dlg(task["error_info"][2])
else:
self.wine.show_error_dlg(task["error_info"][1])
self.connection.task.destroy(self.session_uuid, value)
return 0
def scan_nfs_vhd(self, ref, list, host, path, options):
sr = {
"serverpath" : path,
"server" : host,
"options" : options,
}
value = self.connection.Async.SR.probe(self.session_uuid, ref, sr, "nfs", {})['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
if task['result'].count("<value>"):
result = saxutils.unescape(task['result']).replace("<value>","").replace("</value>","").replace(""", '"')
dom = xml.dom.minidom.parseString(result)
nodes = dom.getElementsByTagName("SRlist")
if len(nodes[0].childNodes):
for i in range(1,len(nodes[0].childNodes),2):
ref = nodes[0].childNodes[i].childNodes[1].childNodes[0].data.strip()
if self.search_storage_uuid(ref) == False:
list.append([ref, ref])
if list.__len__() > 0:
return 2
else:
return 1
else:
self.wine.show_error_dlg(task["error_info"][2])
self.connection.task.destroy(self.session_uuid, value)
return 0
def search_storage_uuid(self, uuid):
"""
Function to search a storage with specify uuid, returns True if found
"""
for stg in self.all_storage.keys():
if self.all_storage[stg]["uuid"] == uuid:
return True
return False
def fill_iscsi_target_iqn(self, ref, list, target, port, user=None, password=None):
list.clear()
sr = {
"port" : port,
"target": target,
}
if user:
sr["chapuser"] = user
if password:
sr["chappassword"] = password
value = self.connection.Async.SR.create(self.session_uuid, ref, sr, "0", "__gui__", "SHOULD NEVER BE CREATED","lvmoiscsi","user", True, {})['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
if task["error_info"][3]:
dom = xml.dom.minidom.parseString(task["error_info"][3])
nodes = dom.getElementsByTagName("TGT")
ix = 1
for i in range(0, len(nodes)):
index = nodes[i].childNodes[1].childNodes[0].data.strip()
ip = nodes[i].childNodes[3].childNodes[0].data.strip()
target = nodes[i].childNodes[5].childNodes[0].data.strip()
list.append([target, "%s (%s)" % (target, ip)])
self.connection.task.destroy(self.session_uuid, value)
return True
else:
self.wine.show_error_dlg(task["error_info"][2])
self.connection.task.destroy(self.session_uuid, value)
return False
def fill_iscsi_target_lun(self, ref, list, target, targetiqn, port, user=None, password=None):
list.clear()
sr = {
"port" : port,
"target": target,
}
# chapuser
# chappassword
if user:
sr["chapuser"] = user
if password:
sr["chappassword"] = password
sr["targetIQN"] = targetiqn
value = self.connection.Async.SR.create(self.session_uuid, ref, sr, "0", "__gui__", "SHOULD NEVER BE CREATED","lvmoiscsi","user", True, {})['Value']
task = self.connection.task.get_record(self.session_uuid, value)['Value']
while task["status"] == "pending":
task = self.connection.task.get_record(self.session_uuid, value)['Value']
if task["error_info"][3]:
dom = xml.dom.minidom.parseString(task["error_info"][3])
nodes = dom.getElementsByTagName("LUN")
for i in range(0, len(nodes)):
vendor = nodes[i].getElementsByTagName("vendor")[0].childNodes[0].data.strip()
#serial = nodes[i].getElementsByTagName("serial")[0].childNodes[0].data.strip()
lunid = nodes[i].getElementsByTagName("LUNid")[0].childNodes[0].data.strip()
size = nodes[i].getElementsByTagName("size")[0].childNodes[0].data.strip()
scsiid = nodes[i].getElementsByTagName("SCSIid")[0].childNodes[0].data.strip()
list.append([scsiid, "LUN %s: %s (%s)" % (lunid, self.convert_bytes(size), vendor)])
self.connection.task.destroy(self.session_uuid, value)
return True
else:
self.wine.show_error_dlg(task["error_info"][2])
self.connection.task.destroy(self.session_uuid, value)
return False
| alanfranz/openxenmanager | openxenmanager/core/oxcSERVER_storage.py | Python | gpl-2.0 | 28,069 |
# Copyright 2015 Santiago R Soler
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import numpy as np
from scipy import fftpack
from matplotlib.patches import Rectangle
from lib.basics import *
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
class WindowConstruction(object):
## Shape = (ny,nx)
def __init__(self,x,y,shape,anomaly,ax,ax2,points2pad):
self.x = x
self.y = y
self.shape = shape
self.ax = ax
self.ax2 = ax2
self.points2pad = points2pad
self.anomaly = anomaly
self.dx = (max(x)-min(x))/(self.shape[1]-1)
self.half_width = (min(self.shape)/16)*self.dx
self.x_center = None
self.y_center = None
self.rect = Rectangle((0,0), 1, 1,fc='None')
self.x1 = None
self.y1 = None
self.x2 = None
self.y2 = None
self.shape_window = None
self.l, = self.ax.plot([self.x_center],[self.y_center],'o')
self.ax.add_patch(self.rect)
self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax.figure.canvas.mpl_connect('scroll_event', self.on_scroll)
self.ax.figure.canvas.mpl_connect('key_press_event', self.on_key)
print "\nINSTRUCTIONS:"
print "Click to select the window center"
print "Move the center with arrows or click again"
print "Resize the window with the mouse scroll or with '+' and '-'"
print "Press 'i' to show information about the window"
print "Press Enter or Right Click to plot the spectrum of the current window\n"
def on_press(self,event):
if event.inaxes!=self.ax:
return
if event.button == 1:
self.x_center = event.xdata
self.y_center = event.ydata
self.x_center, self.y_center = nearest_point(self.x_center,
self.y_center, self.x,self.y)
elif event.button == 3:
self.plot_spectrum()
self.rectangle_construction()
def on_scroll(self,event):
self.half_width += event.step*self.dx
self.rectangle_construction()
def on_key(self,event):
event_list = ["right","left","up","down"]
if event.key in event_list:
if event.key == "right":
self.x_center += self.dx
elif event.key == "left":
self.x_center -= self.dx
elif event.key == "up":
self.y_center += self.dx
elif event.key == "down":
self.y_center -= self.dx
self.rectangle_construction()
if event.key == "enter":
self.plot_spectrum()
if event.key == "i":
print "(x,y)=",(self.x_center,self.y_center),"Width:",self.half_width*2
if event.key == "+" or event.key == "-":
if event.key == "+":
self.half_width += self.dx
elif event.key == "-":
self.half_width -= self.dx
self.rectangle_construction()
def rectangle_construction(self):
self.x1 = self.x_center - self.half_width
self.x2 = self.x_center + self.half_width
self.y1 = self.y_center - self.half_width
self.y2 = self.y_center + self.half_width
self.shape_window = (2*self.half_width/self.dx+1,
2*self.half_width/self.dx+1)
self.rect.set_width(self.x2 - self.x1)
self.rect.set_height(self.y2 - self.y1)
self.rect.set_xy((self.x1, self.y1))
self.l.set_xdata([self.x_center])
self.l.set_ydata([self.y_center])
self.ax.figure.canvas.draw()
def plot_radial_spectrum(self,f,log_spectrum,log_errors):
self.ax2.cla()
self.ax2.errorbar(f,log_spectrum,yerr=log_errors,fmt='o')
self.ax2.set_ylabel(r"$\ln(\Phi_{\Delta T})$")
self.ax2.set_ylim(-max(abs(log_spectrum)),0.2)
self.ax2.set_xlim(0,max(f))
self.ax2.figure.canvas.draw()
def plot_spectrum(self):
area = (self.x1, self.x2, self.y1, self.y2)
x_cut, y_cut, [anomaly_cut], shape_cut = cut_regular(self.x,self.y,[self.anomaly],self.shape,area)
n_padding = int((self.points2pad - self.shape_window[1])/2.0)
if n_padding < 0: n_padding = 0
x_pad, y_pad, anomaly_pad, shape_pad = padding(x_cut,y_cut,anomaly_cut,shape_cut,n_padding)
f, spectrum, log_spectrum, errors, log_errors = radial_spectrum(x_pad,y_pad,anomaly_pad,shape_pad)
self.plot_radial_spectrum(f,log_spectrum,log_errors)
def cut_regular(x, y, scalars, shape, area):
"""
Return a subsection of a regular grid.
The returned subsection is not a copy! In technical terms, returns a slice
of the numpy arrays. So changes made to the subsection reflect on the
original grid. Use numpy.copy to make copies of the subsections and avoid
this.
Parameters:
* x, y
Arrays with the x and y coordinates of the data points.
* scalars
List of arrays with the scalar values assigned to the grid points.
* shape: tuple(ny,nx)
Shape of the original data points.
* area
``(x1, x2, y1, y2)``: Borders of the subsection
Returns:
* ``[subx, suby, subscalars,shape_window]``
Arrays with x and y coordinates and scalar values of the subsection.
shape_window: tuple(ny,nx)
Shape of the cutted grid.
"""
xmin, xmax, ymin, ymax = area
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
# Calculates the distance between neighbour points
if shape[0]*shape[1] != len(x):
raise ValueError("The shape isn't the shape of the x,y grid.")
distance_x = abs(max(x)-min(x)) / (shape[1]-1)
distance_y = abs(max(y)-min(y)) / (shape[0]-1)
shape_cutted = (int(abs(ymax-ymin)/distance_y + 1),
int(abs(xmax-xmin)/distance_x + 1))
if len(x) != len(y):
raise ValueError("x and y must have the same length")
inside = [i for i in xrange(len(x))
if x[i] >= xmin and x[i] <= xmax
and y[i] >= ymin and y[i] <= ymax]
return [x[inside], y[inside], [s[inside] for s in scalars], shape_cutted]
def padding(x,y,z,shape,n):
# padding del z
n = int(n)
z = np.reshape(z, shape)
z_pad = np.pad(z,n,'constant',constant_values=0)
shape_pad = np.shape(z_pad)
z_pad = np.reshape(z_pad,shape_pad)
# padding del x,y
dx = abs(max(x)-min(x))/(shape[1]-1)
xx = np.linspace(min(x)-n*dx,max(x)+n*dx,shape[1]+2*n)
yy = np.linspace(min(y)-n*dx,max(y)+n*dx,shape[1]+2*n)
x_pad, y_pad = np.meshgrid(xx,yy)
x_pad = np.reshape(x_pad,shape_pad[0]*shape_pad[1])
y_pad = np.reshape(y_pad,shape_pad[0]*shape_pad[1])
return x_pad, y_pad, z_pad, shape_pad
def radial_spectrum(x,y,anomaly,shape):
nodes_distance = (max(x)-min(x))/(shape[1]-1)
anomaly_2d = np.reshape(anomaly,shape)
anomaly_fft = fftpack.fftshift(fftpack.fft2(anomaly_2d))
power_spectrum = abs(anomaly_fft)**2
rings = rings_construction(shape[0])
spectrum, errors = radial_profile_w_errors(power_spectrum,rings)
spectrum_max = max(spectrum)
spectrum = spectrum/spectrum_max
errors = errors/spectrum_max
log_spectrum = np.log(spectrum)
log_errors = abs(errors/spectrum)
f = fftpack.fftfreq(shape[0],nodes_distance)
f = f[:len(f)/2+1]
return f, spectrum, log_spectrum, errors, log_errors
class AreaSelection(object):
"""
This objects allows us to select a rectangular area from a plot.
Arguments:
verbose_points (bool): shows the points coordinates.
verbose_size (bool): shows the size of the area.
Properties:
x1 (float): x coord of the first point
x2 (float): y coord of the first point
y1 (float): x coord of the second point
y2 (float): y coord of the second point
Usage:
import matplotlib.pyplot as plt
# Plot (you must draw the rectangle)
area = AreaSelection(True,True)
plt.plot()
x1 = area.x1
y1 = area.y1
x2 = area.x2
y2 = area.y2
print x1,y1,x2,y2
"""
def __init__(self,verbose_points=False,verbose_size=True):
self.verbose_points = verbose_points
self.verbose_size = verbose_size
self.ax = plt.gca()
self.rect = Rectangle((0,0), 1, 1,fc='r',alpha=0.5)
self.x1 = None
self.y1 = None
self.x2 = None
self.y2 = None
self.onpress = False
self.ax.add_patch(self.rect)
self.ax.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.ax.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release)
def on_press(self, event):
if event.inaxes!=self.ax:
return
self.onpress = True
self.x1 = event.xdata
self.y1 = event.ydata
if self.verbose_points==True:
print '(x1,y1):',(self.x1,self.y1)
def on_motion(self,event):
if event.inaxes!=self.ax:
return
if self.onpress==True:
self.x2 = event.xdata
self.y2 = event.ydata
self.rect.set_width(self.x2 - self.x1)
self.rect.set_height(self.y2 - self.y1)
self.rect.set_xy((self.x1, self.y1))
self.ax.figure.canvas.draw()
def on_release(self, event):
if event.inaxes!=self.ax:
return
self.onpress = False
self.x2 = event.xdata
self.y2 = event.ydata
self.rect.set_width(self.x2 - self.x1)
self.rect.set_height(self.y2 - self.y1)
self.rect.set_xy((self.x1, self.y1))
self.ax.figure.canvas.draw()
if self.verbose_points==True:
print '(x2,y2):',(self.x2,self.y2)
if self.verbose_size==True:
print "Area Size: ",abs(self.x2-self.x1),"x", abs(self.y2-self.y1)
def curve_fitting(f_fit,log_spectrum_fit,log_errors_fit,f,log_spectrum,log_errors):
def log_spectrum_function(k,zt,zb,a):
return a - 2*k*zt + 2*np.log(np.ones(len(k))- np.exp(-k*(zb-zt)))
res = curve_fit(log_spectrum_function,f_fit,log_spectrum_fit,
p0=[1000,2500,-1],sigma=log_errors_fit,maxfev=10000*(len(f_fit)+1))
zt = res[0][0]
zb = res[0][1]
a = res[0][2]
varianzas = np.sqrt(np.diag(res[1]))
zt_km = zt/1000.0/(2*np.pi)
zb_km = zb/1000.0/(2*np.pi)
varianza_zt = varianzas[0]/1000.0/(2*np.pi)
varianza_zb = varianzas[1]/1000.0/(2*np.pi)
varianza_a = varianzas[2]/1000.0/(2*np.pi)
print "\nFitting Results [km]:"
print "Zt=",zt_km,"+/-",varianza_zt
print "Zb=",zb_km,"+/-",varianza_zb
print "Relative error of Zb=",varianza_zb/zb_km, "\n"
if varianza_zb/zb_km > 0.4:
print "VERY HIGH ERROR!\n"
f_curve = np.linspace(0,max(f_fit),100)
plt.plot(f_curve[1:],log_spectrum_function(f_curve[1:],zt,zb,a))
plt.plot(f,log_spectrum,'.')
plt.errorbar(f_fit,log_spectrum_fit,yerr=log_errors_fit,fmt='o',color='r')
plt.show()
return zt,zb,a,varianzas
| santis19/tesina-fisica | Curie/lib/objects_functions.py | Python | gpl-2.0 | 11,999 |
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from . import ( # noqa analysis:ignore
survey_variables,
) | adrienpacifico/openfisca-france-data | openfisca_france_data/model/input_variables/__init__.py | Python | agpl-3.0 | 969 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-20010 Universidad Rey Juan Carlos, GSyC/LibreSoft
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author : Jose Antonio Santos Cadenas <jcaden __at__ gsyc __dot__ es>
# Author : Jose Gato Luis <jgato@libresoft.es>
from format.utils import getResponseFormat, generateResponse
from social.core import api
from utils import error
def group_create (request):
format = getResponseFormat (request)
if not request.user.is_authenticated():
return error(format, "The user is not authenticated")
if request.method == "POST":
if ("groupname" in request.POST):
group={'groupname':request.POST["groupname"]}
correct, message=api.group.create_or_modify(group, modify=False)
if correct:
data = {'code' : '200',
'id' : message,
'description' : 'Group created correctly',}
return generateResponse(format, data, "ok")
else:
return error(format, message)
else:
return error(format, 'Missing parameters')
else:
return error(format, 'Need a POST petition')
def group_list (request):
format = getResponseFormat (request)
if not request.user.is_authenticated():
return error(format, "The user is not authenticated")
try:
groups=api.group.get_all(request.user.id)
data = {'code' : '200',
'groups' : groups}
return generateResponse(format, data, "group/list")
except:
return error(format, 'Some errors occurred')
def group_data (request, group):
format = getResponseFormat (request)
if not request.user.is_authenticated():
return error(format, "The user is not authenticated")
try:
group=api.group.get_data(group, request.user.id)
if group == None:
return error(format, 'Group doesn\'t exist')
else:
data = {"code" : "200",
"group" : group,
"request" : request}
return generateResponse(format, data, "group/data")
except:
return error(format, 'Group doesn\'t exist')
def group_elements(request, group):
format = getResponseFormat (request)
if not request.user.is_authenticated():
return error(format, "The user is not authenticated")
try:
elements=api.group.get_group_elements(group, request.user)
data={"code": "200",
"elements": elements}
return generateResponse(format, data, "group/elements")
except:
return error(format, "Unknown error")
def group_delete (request):
format = getResponseFormat (request)
if not request.user.is_authenticated():
return error(format, "The user is not authenticated")
if request.method == "POST":
if ("groupid" in request.POST):
group=request.POST['groupid']
correct, message = api.group.delete(group)
if correct:
data = {'code' : '200',
'description' : 'Group deleted correctly',}
return generateResponse(format, data, "ok")
else:
return error(format, message)
else:
return error(format, 'Missing parameters')
else:
return error(format, 'Need a POST petition')
def group_join (request):
format = getResponseFormat (request)
if not request.user.is_authenticated():
return error(format, "The user is not authenticated")
if request.method == "POST":
if ("groupid" in request.POST) and ("userid" in request.POST):
try:
group=request.POST["groupid"]
user=request.POST["userid"]
correct, message = api.group.join(group, user)
if correct:
data={'code' : '200',
'description' : 'Joined correctly',}
return generateResponse(format, data, "ok")
else:
return error (format, message)
except:
return error(format, 'Unknown error occurred')
else:
return error(format, 'Missing parameters')
else:
return error(format, 'Need a POST petition')
def group_join_delete (request):
format = getResponseFormat (request)
if not request.user.is_authenticated():
return error(format, "The user is not authenticated")
if request.method == "POST":
if ("groupid" in request.POST) and ("userid" in request.POST):
try:
group=request.POST["groupid"]
user=request.POST["userid"]
correct, message = api.group.unjoin (group, user)
if correct:
data = {'code' : '200',
'description' : 'Unjoined correctly',}
return generateResponse(format, data, "ok")
else:
return error(format, message)
except:
return error(format, 'Unknown error occurred')
else:
return error(format, 'Missing parameters')
else:
return error(format, 'Need a POST petition')
| kgblll/libresoft-gymkhana | social/rest/groups.py | Python | gpl-2.0 | 5,067 |
from wptserve.utils import isomorphic_decode
def main(request, response):
id = request.GET[b'id']
encoding = request.GET[b'encoding']
mode = request.GET[b'mode']
iframe = u""
if mode == b'NETWORK':
iframe = u"<iframe src='stash.py?q=%%C3%%A5&id=%s&action=put'></iframe>" % isomorphic_decode(id)
doc = u"""<!doctype html>
<html manifest="manifest.py?id=%s&encoding=%s&mode=%s">
%s
""" % (isomorphic_decode(id), isomorphic_decode(encoding), isomorphic_decode(mode), iframe)
return [(b"Content-Type", b"text/html; charset=%s" % encoding)], doc.encode(isomorphic_decode(encoding))
| scheib/chromium | third_party/blink/web_tests/external/wpt/html/infrastructure/urls/resolving-urls/query-encoding/resources/page-using-manifest.py | Python | bsd-3-clause | 614 |
import networkx as nx
import itertools
import matplotlib.pyplot as plt
fig = plt.figure()
fig.subplots_adjust(left=0.2, wspace=0.6)
G = nx.Graph()
G.add_edges_from([(1,2,{'w': 6}),
(2,3,{'w': 3}),
(3,1,{'w': 4}),
(3,4,{'w': 12}),
(4,5,{'w': 13}),
(5,3,{'w': 11}),
])
import pprint
# detect triangles
triangles = []
for trio in itertools.combinations(G.nodes(), 3):
vertices = []
for v in itertools.combinations(trio, 2):
vertice = G.get_edge_data(*v)
if vertice:
vertices.append(v)
if len(vertices)==3:
triangles.append(vertices)
pos = nx.spring_layout(G)
graph1 = fig.add_subplot(121)
# graph1.plot(nx.draw_networkx_nodes(G, pos=pos, node_size=[G.degree(n) for n in G.nodes()], label=True, alpha=0.75),
# nx.draw_networkx_edges(G, pos=pos, width=[G.get_edge_data(*e)['w'] for e in G.edges()], alpha=0.75))
graph1.plot(nx.draw(G,
pos=pos,
node_size = [G.degree(n) for n in G.nodes()],
width = [G.get_edge_data(*e)['w'] for e in G.edges()],
edge_color = [G.get_edge_data(*e)['w'] for e in G.edges()] ))
#plt.show()
for t in triangles:
weights = {}
for v in t:
k = (G.get_edge_data(*v)['w'])
weights[k]=v
l = weights.keys()
if len(l) != 1:
l.sort()
l.reverse()
pprint.pprint(l)
quitar = l.pop()
G.remove_edge(*weights[quitar])
graph2 = fig.add_subplot(122)
graph2.plot(nx.draw(G,
pos=pos,
node_size = [G.degree(n) for n in G.nodes()],
width = [G.get_edge_data(*e)['w'] for e in G.edges()],
edge_color = [G.get_edge_data(*e)['w'] for e in G.edges()] ))
plt.show()
| CSB-IG/natk | ninnx/pruning/mi_triangles.py | Python | gpl-3.0 | 1,793 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import subprocess
import re
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
log = logging.getLogger(__name__)
class SparkSubmitHook(BaseHook):
"""
This hook is a wrapper around the spark-submit binary to kick off a spark-submit job.
It requires that the "spark-submit" binary is in the PATH or the spark_home to be
supplied.
:param conf: Arbitrary Spark configuration properties
:type conf: dict
:param conn_id: The connection id as configured in Airflow administration. When an
invalid connection_id is supplied, it will default to yarn.
:type conn_id: str
:param files: Upload additional files to the container running the job, separated by a
comma. For example hive-site.xml.
:type files: str
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:type py_files: str
:param jars: Submit additional jars to upload and place them in executor classpath.
:type jars: str
:param java_class: the main class of the Java application
:type java_class: str
:param executor_cores: Number of cores per executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:type driver_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param name: Name of the job (default airflow-spark)
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:type verbose: bool
"""
def __init__(self,
conf=None,
conn_id='spark_default',
files=None,
py_files=None,
jars=None,
java_class=None,
executor_cores=None,
executor_memory=None,
driver_memory=None,
keytab=None,
principal=None,
name='default-name',
num_executors=None,
verbose=False):
self._conf = conf
self._conn_id = conn_id
self._files = files
self._py_files = py_files
self._jars = jars
self._java_class = java_class
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._name = name
self._num_executors = num_executors
self._verbose = verbose
self._sp = None
self._yarn_application_id = None
(self._master, self._queue, self._deploy_mode, self._spark_home) = self._resolve_connection()
self._is_yarn = 'yarn' in self._master
def _resolve_connection(self):
# Build from connection master or default to yarn if not available
master = 'yarn'
queue = None
deploy_mode = None
spark_home = None
try:
# Master can be local, yarn, spark://HOST:PORT or mesos://HOST:PORT
conn = self.get_connection(self._conn_id)
if conn.port:
master = "{}:{}".format(conn.host, conn.port)
else:
master = conn.host
# Determine optional yarn queue from the extra field
extra = conn.extra_dejson
if 'queue' in extra:
queue = extra['queue']
if 'deploy-mode' in extra:
deploy_mode = extra['deploy-mode']
if 'spark-home' in extra:
spark_home = extra['spark-home']
except AirflowException:
logging.debug(
"Could not load connection string {}, defaulting to {}".format(
self._conn_id, master
)
)
return master, queue, deploy_mode, spark_home
def get_conn(self):
pass
def _build_command(self, application):
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._spark_home:
connection_cmd = [os.path.join(self._spark_home, 'bin', 'spark-submit')]
else:
connection_cmd = ['spark-submit']
# The url ot the spark master
connection_cmd += ["--master", self._master]
if self._conf:
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._queue:
connection_cmd += ["--queue", self._queue]
if self._deploy_mode:
connection_cmd += ["--deploy-mode", self._deploy_mode]
# The actual script to execute
connection_cmd += [application]
logging.debug("Spark-Submit cmd: {}".format(connection_cmd))
return connection_cmd
def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_command(application)
self._sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
# Using two iterators here to support 'real-time' logging
sources = [self._sp.stdout, self._sp.stderr]
for source in sources:
self._process_log(iter(source.readline, b''))
output, stderr = self._sp.communicate()
if self._sp.returncode:
raise AirflowException(
"Cannot execute: {}. Error code is: {}. Output: {}, Stderr: {}".format(
spark_submit_cmd, self._sp.returncode, output, stderr
)
)
def _process_log(self, itr):
"""
Processes the log files and extracts useful information out of it
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.decode('utf-8').strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._deploy_mode == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
# Pass to logging
logging.info(line)
def on_kill(self):
if self._sp and self._sp.poll() is None:
logging.info('Sending kill signal to spark-submit')
self.sp.kill()
if self._yarn_application_id:
logging.info('Killing application on YARN')
yarn_kill = Popen("yarn application -kill {0}".format(self._yarn_application_id),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logging.info("YARN killed with return code: {0}".format(yarn_kill.wait()))
| stverhae/incubator-airflow | airflow/contrib/hooks/spark_submit_hook.py | Python | apache-2.0 | 9,654 |
import sys
def ask():
prompt = '>'
while True:
response = input(prompt)
if not response:
return 0
yield response
def parse_args():
yield from iter(sys.argv[1:])
def fetch(producer):
gen = producer()
next(gen)
yield from gen
def main(args):
if args:
producer = parse_args
else:
producer = ask
total = 0
count = 0
gen = fetch(producer())
while True:
term = yield from gen
term = float(term)
total += term
count += 1
average = total / count
print('total: {} average: {}'.format(total, average))
if __name__ == '__main__':
main(sys.argv[1:])
| YuxuanLing/trunk | trunk/code/study/python/Fluent-Python-example-code/attic/control/adder/yield_from_input.py | Python | gpl-3.0 | 744 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.training import learning_rate_decay_v2
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.exponential_decay"])
def exponential_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step / decay_steps` is an
integer division and the decayed learning rate follows a staircase function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.exponential_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=staircase,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
@tf_export(v1=["train.piecewise_constant"])
def piecewise_constant(x, boundaries, values, name=None):
"""Piecewise constant from boundaries and interval values.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
# Later, whenever we perform an optimization step, we increment global_step.
```
Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or `float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
Raises:
ValueError: if types of `x` and `boundaries` do not match, or types of all
`values` do not match or
the number of elements in the lists does not match.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.piecewise_constant(x, boundaries, values,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
@tf_export(v1=["train.polynomial_decay"])
def polynomial_decay(learning_rate,
global_step,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
It requires a `global_step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `global_steps`.
```python
decay_steps = decay_steps * ceil(global_step / decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate = tf.train.polynomial_decay(starter_learning_rate, global_step,
decay_steps, end_learning_rate,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.polynomial_decay(
learning_rate,
global_step,
decay_steps,
end_learning_rate=end_learning_rate,
power=power,
cycle=cycle,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
@tf_export(v1=["train.natural_exp_decay"])
def natural_exp_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
decay_step))
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 5
k = 0.5
learning_rate = tf.train.natural_exp_decay(learning_rate, global_step,
decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.natural_exp_decay(
learning_rate, global_step, decay_steps, decay_rate, staircase=staircase,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
@tf_export(v1=["train.inverse_time_decay"])
def inverse_time_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step /
decay_step))
```
Example: decay 1/t with a rate of 0.5:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate = tf.train.inverse_time_decay(learning_rate, global_step,
decay_steps, decay_rate)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.inverse_time_decay(
learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=staircase,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
@tf_export(v1=["train.cosine_decay"])
def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None):
"""Applies cosine decay to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed = cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.cosine_decay(
learning_rate, global_step, decay_steps, alpha=alpha, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
@tf_export(v1=["train.cosine_decay_restarts"])
def cosine_decay_restarts(learning_rate,
global_step,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function with
restarts to a provided initial learning rate. It requires a `global_step`
value to compute the decayed learning rate. You can just pass a TensorFlow
variable that you increment at each training step.
The function returns the decayed learning rate while taking into account
possible warm restarts. The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more steps
and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed = cosine_decay_restarts(learning_rate, global_step,
first_decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.cosine_decay_restarts(
learning_rate,
global_step,
first_decay_steps,
t_mul=t_mul,
m_mul=m_mul,
alpha=alpha,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
@tf_export(v1=["train.linear_cosine_decay"])
def linear_cosine_decay(learning_rate,
global_step,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a linear cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed = linear_cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.linear_cosine_decay(
learning_rate,
global_step,
decay_steps,
num_periods=num_periods,
alpha=alpha,
beta=beta,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
@tf_export(v1=["train.noisy_linear_cosine_decay"])
def noisy_linear_cosine_decay(learning_rate,
global_step,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a noisy linear
cosine decay function to a provided initial learning rate.
It requires a `global_step` value to compute the decayed learning rate.
You can just pass a TensorFlow variable that you increment at each
training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed = noisy_linear_cosine_decay(
learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_decay_v2.noisy_linear_cosine_decay(
learning_rate, global_step,
decay_steps,
initial_variance=initial_variance,
variance_decay=variance_decay,
num_periods=num_periods,
alpha=alpha,
beta=beta,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr()
return decayed_lr
| dongjoon-hyun/tensorflow | tensorflow/python/training/learning_rate_decay.py | Python | apache-2.0 | 27,425 |
# Copyright 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.db import exception as d_exc
from neutron import context
from neutron.db import models_v2
from neutron.plugins.vmware.dbexts import db as nsx_db
from neutron.plugins.vmware.dbexts import models
from neutron.tests.unit import testlib_api
class NsxDBTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(NsxDBTestCase, self).setUp()
self.ctx = context.get_admin_context()
def _setup_neutron_network_and_port(self, network_id, port_id):
with self.ctx.session.begin(subtransactions=True):
self.ctx.session.add(models_v2.Network(id=network_id))
port = models_v2.Port(id=port_id,
network_id=network_id,
mac_address='foo_mac_address',
admin_state_up=True,
status='ACTIVE',
device_id='',
device_owner='')
self.ctx.session.add(port)
def test_add_neutron_nsx_port_mapping_handle_duplicate_constraint(self):
neutron_net_id = 'foo_neutron_network_id'
neutron_port_id = 'foo_neutron_port_id'
nsx_port_id = 'foo_nsx_port_id'
nsx_switch_id = 'foo_nsx_switch_id'
self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)
nsx_db.add_neutron_nsx_port_mapping(
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
# Call the method twice to trigger a db duplicate constraint error
nsx_db.add_neutron_nsx_port_mapping(
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
result = (self.ctx.session.query(models.NeutronNsxPortMapping).
filter_by(neutron_id=neutron_port_id).one())
self.assertEqual(nsx_port_id, result.nsx_port_id)
self.assertEqual(neutron_port_id, result.neutron_id)
def test_add_neutron_nsx_port_mapping_raise_on_duplicate_constraint(self):
neutron_net_id = 'foo_neutron_network_id'
neutron_port_id = 'foo_neutron_port_id'
nsx_port_id_1 = 'foo_nsx_port_id_1'
nsx_port_id_2 = 'foo_nsx_port_id_2'
nsx_switch_id = 'foo_nsx_switch_id'
self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)
nsx_db.add_neutron_nsx_port_mapping(
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id_1)
# Call the method twice to trigger a db duplicate constraint error,
# this time with a different nsx port id!
self.assertRaises(d_exc.DBDuplicateEntry,
nsx_db.add_neutron_nsx_port_mapping,
self.ctx.session, neutron_port_id,
nsx_switch_id, nsx_port_id_2)
def test_add_neutron_nsx_port_mapping_raise_integrity_constraint(self):
neutron_port_id = 'foo_neutron_port_id'
nsx_port_id = 'foo_nsx_port_id'
nsx_switch_id = 'foo_nsx_switch_id'
self.assertRaises(d_exc.DBError,
nsx_db.add_neutron_nsx_port_mapping,
self.ctx.session, neutron_port_id,
nsx_switch_id, nsx_port_id)
| gkotton/vmware-nsx | vmware-nsx/neutron/tests/unit/vmware/db/test_nsx_db.py | Python | apache-2.0 | 3,796 |
# Copyright (C) 2010 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for bazaar control directories that do not support colocated branches.
Colocated branch support is optional, and when it is not supported the methods
and attributes colocated branch support added should fail in known ways.
"""
from bzrlib import (
errors,
tests,
)
from bzrlib.tests import (
per_controldir,
)
class TestNoColocatedSupport(per_controldir.TestCaseWithControlDir):
def make_bzrdir_with_repo(self):
# a bzrdir can construct a branch and repository for itself.
if not self.bzrdir_format.is_supported():
# unsupported formats are not loopback testable
# because the default open will not open them and
# they may not be initializable.
raise tests.TestNotApplicable('Control dir format not supported')
t = self.get_transport()
try:
made_control = self.make_bzrdir('.', format=self.bzrdir_format)
except errors.UninitializableFormat:
raise tests.TestNotApplicable('Control dir format not initializable')
self.assertEquals(made_control._format, self.bzrdir_format)
made_repo = made_control.create_repository()
return made_control
def test_destroy_colocated_branch(self):
branch = self.make_branch('branch')
# Colocated branches should not be supported *or*
# destroy_branch should not be supported at all
self.assertRaises(
(errors.NoColocatedBranchSupport, errors.UnsupportedOperation),
branch.bzrdir.destroy_branch, 'colo')
def test_create_colo_branch(self):
made_control = self.make_bzrdir_with_repo()
self.assertRaises(errors.NoColocatedBranchSupport,
made_control.create_branch, "colo")
def test_open_branch(self):
made_control = self.make_bzrdir_with_repo()
self.assertRaises(errors.NoColocatedBranchSupport,
made_control.open_branch, name="colo")
def test_get_branch_reference(self):
made_control = self.make_bzrdir_with_repo()
self.assertRaises(errors.NoColocatedBranchSupport,
made_control.get_branch_reference, "colo")
def test_set_branch_reference(self):
referenced = self.make_branch('referenced')
made_control = self.make_bzrdir_with_repo()
self.assertRaises(errors.NoColocatedBranchSupport,
made_control.set_branch_reference, referenced, name="colo")
def test_get_branches(self):
made_control = self.make_bzrdir_with_repo()
made_control.create_branch()
self.assertEqual(made_control.get_branches().keys(),
[""])
| stewartsmith/bzr | bzrlib/tests/per_controldir_colo/test_unsupported.py | Python | gpl-2.0 | 3,407 |
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.utilities import subDict, dictCombinations
import pylab
def plotVariations(datalist, titles, genFun, varyperplot=None, prePlotFun=None, postPlotFun=None,
_differentiator=0.0, **optionlists):
""" A tool for quickly generating a lot of variations of a plot.
Generates a number of figures from a list of data (and titles).
For each data item it produces one or more figures, each with one or more plots, while varying
all options in optionlists (trying all combinations).
:arg genFun: is the function that generates the curve to be plotted, for each set of options.
:key varyperplot: determines which options are varied within a figure.
:key prePlotFun: is called before the plots of a figure
:key postPlotFun: is called after the plots of a figure (e.g. for realigning axes).
"""
odl = subDict(optionlists, varyperplot, False)
fdl = subDict(optionlists, varyperplot, True)
# title contains file and non-varying parameters
titadd1 = ''.join([k+'='+str(vs[0])[:min(5, len(str(vs[0])))]+' '
for k,vs in odl.items()
if len(vs) == 1])
for x, tit in zip(datalist, titles):
for figdict in sorted(dictCombinations(fdl.copy())):
pylab.figure()
# it also contains the parameters that don't vary per figure
titadd2 = ''.join([k+'='+str(v)[:min(5, len(str(v)))]+' '
for k,v in figdict.items()])
pylab.title(tit+'\n'+titadd1+titadd2)
# code initializing the plot
if prePlotFun is not None:
prePlotFun(x)
for i, odict in enumerate(sorted(dictCombinations(odl.copy()))):
# concise labels
lab = ''.join([k[:3]+'='+str(v)[:min(5, len(str(v)))]+'-'
for k,v in odict.items()
if len(odl[k]) > 1])
if len(lab) > 0:
lab = lab[:-1] # remove trailing '-'
else:
lab = None
generated = genFun(x, **dict(odict, **figdict))
if generated is not None:
if len(generated) == 2:
xs, ys = generated
else:
ys = generated
xs = range(len(ys))
# the differentiator can slightly move the curves to be able to tell them apart if they overlap
if _differentiator != 0.0:
ys = generated+_differentiator*i
pylab.plot(xs, ys, label=lab)
if postPlotFun is not None:
postPlotFun(tit)
# a legend is only necessary, if there are multiple plots
if lab is not None:
pylab.legend()
| hassaanm/stock-trading | src/pybrain/tools/plotting/quickvariations.py | Python | apache-2.0 | 2,907 |
try:
import simplejson as json
except:
import json
import os
import re
import requests
import ConfigParser
import StringIO
from githubcollective.team import Team
from githubcollective.repo import Repo, REPO_BOOL_OPTIONS, \
REPO_RESERVED_OPTIONS
from githubcollective.hook import Hook, HOOK_BOOL_OPTIONS
BASE_URL = 'https://api.github.com'
TEAM_PREFIX = '--auto-'
TEAM_OWNERS_SUFFIX = '-owners'
LOCAL_SECTION_PREFIXES = ('repo',)
class Config(object):
def __init__(self, filename, verbose, pretend):
self._teams = {}
self._repos = {}
self._fork_urls = {}
self.filename = filename
self.verbose = verbose
self.pretend = pretend
if type(filename) is file:
data = filename.read()
elif type(filename) in [str, unicode] and \
self.is_url(filename):
response = requests.get(filename)
response.raise_for_status()
data = response.text
elif type(filename) in [str, unicode] and \
os.path.exists(filename):
f = open(filename)
data = f.read()
f.close()
else:
data = filename
if data:
self._teams, self._repos, self._fork_urls = self.parse(data)
def parse(self, data):
teams, repos = {}, {}
data = json.loads(data)
for team in data['teams']:
team = Team(**team)
teams[team.name] = team
for repo in data['repos']:
if repo['hooks']:
repo['hooks'] = [Hook(**hook) for hook in repo['hooks']]
repo = Repo(**repo)
repos[repo.name] = repo
return teams, repos, data['fork_urls']
def dumps(self, cache):
if cache.mode != 'w+':
cache = open(cache.name, 'w+')
cache.truncate(0)
cache.seek(0)
json.dump({
'teams': [self._teams[name].dumps() for name in self.teams],
'repos': [self._repos[name].dumps() for name in self.repos],
'fork_urls': self._fork_urls,
}, cache, indent=4)
def is_url(self, url):
return url.startswith('http')
@property
def teams(self):
return set(self._teams.keys())
def get_team(self, name):
return self._teams.get(name, None)
def get_team_members(self, name):
members = []
team = self.get_team(name)
if team:
members = team.members
return set(members)
def get_team_repos(self, name):
repos = []
team = self.get_team(name)
if team:
repos = team.repos
return set(repos)
@property
def repos(self):
return set(self._repos.keys())
def get_repo(self, name):
return self._repos.get(name, None)
def get_fork_url(self, repo):
return self._fork_urls.get(repo, None)
_template_split = re.compile('([$]{[^}]*?})').split
def substitute(value, config, context=None, local=False, stack=()):
"""Carry out subsitution of values in the form ${section:option}.
`value`: the original value to have substitution applied.
`config`: an instance of a ConfigParser.
`context`: the identifier of the current section context (used to
determine empty section name lookups).
`stack`: the current tuple of fields inspected through recursion.
`local`: resolve local references (eg Hook references Repo) against
the given ``context``.
Strongly influenced by what ``zc.buildout`` does.
"""
parts = _template_split(value)
subs = []
for ref in parts[1::2]:
option_parts = tuple(ref[2:-1].rsplit(':', 1))
if len(option_parts) < 2:
raise ValueError("The substitution %s is missing a colon." % ref)
section, option = option_parts
#Support ${:option} and ${repo:option} syntaxes
if not section or local:
section = context
#Only lookup now if substituting from global config
if section not in LOCAL_SECTION_PREFIXES:
sub = config.get(section, option)
#Recurse accordingly to resolve nested substitution
if '${' in sub:
if ref in stack:
circle = ' --> '.join(stack + (ref,) )
raise ValueError(
"Circular reference in substitutions %s." % circle
)
sub = substitute(value=sub,
config=config,
context=section,
stack=stack + (ref,))
subs.append(sub)
#Leave alone until we process the context
else:
subs.append(ref)
subs.append('')
#Rejoin normal parts and resolved substititions
substitution = ''.join([''.join(v) for v in zip(parts[::2], subs)])
return substitution
def global_substitute(config):
"""Take care of all global configuration substitutions.
This function will not adjust `local` substitions as these
cannot be resolved until a relevant context is available later.
`config` will be modified in place.
"""
for section in config.sections():
for option, value in config.items(section):
if '${' in value:
sub_value = substitute(value=value,
config=config,
context=section,
local=False)
config.set(section, option, sub_value)
def load_config(data):
"""Load configuration using string data."""
config = ConfigParser.SafeConfigParser()
config.readfp(StringIO.StringIO(data))
return config
def output_config(config):
"""Output configuration back to string."""
result = StringIO.StringIO()
config.write(result)
return result.getvalue().expandtabs(4)
class ConfigCFG(Config):
def parse(self, data):
teams, repos, fork_urls = {}, {}, {}
config = load_config(data)
# global substitutions in ${section:option} style
global_substitute(config)
if self.verbose:
print 'RESOLVED CONFIGURATION:\n'
print output_config(config)
for section in config.sections():
if section.startswith('repo:'):
# add repo
name = section[len('repo:'):]
# load configuration for repo
repo_config = dict(config.items(section))
# remove reserved properties
for option in REPO_RESERVED_OPTIONS:
if option in repo_config:
del repo_config[option]
# coerce boolean values
for option in REPO_BOOL_OPTIONS:
if option in repo_config:
repo_config[option] = config.getboolean(section,
option)
# load hooks for repo
hooks = []
if config.has_option(section, 'hooks'):
for hook in config.get(section, 'hooks').split():
hook_section = 'hook:%s' % hook
hook_config = {}
for config_key, config_value in config.items(hook_section):
# local variable substitution
if '${' in config_value:
config_value = substitute(value=config_value,
config=config,
context=section,
local=True)
# coerce values into correct formats
if config_key == 'config':
config_value = config_value.replace('\n', '')
elif config_key == 'events':
config_value = config_value.split()
if config_key in HOOK_BOOL_OPTIONS:
config_value = config.getboolean(hook_section,
config_key)
hook_config[config_key] = config_value
hooks.append(Hook(**hook_config))
repos[name] = Repo(name=name, hooks=hooks, **repo_config)
# add fork
if config.has_option(section, 'fork'):
fork_urls[name] = config.get(section, 'fork')
# add owners team
if config.has_option(section, 'owners'):
team_name = TEAM_PREFIX + name + TEAM_OWNERS_SUFFIX
team_members = config.get(section, 'owners').split()
teams[team_name] = Team(team_name, 'admin',
members=team_members, repos=[name])
elif section.startswith('team:'):
# add team
name = TEAM_PREFIX + section[len('team:'):]
permission = 'pull'
if config.has_option(section, 'permission'):
permission = config.get(section, 'permission')
members = []
if config.has_option(section, 'members'):
members = config.get(section, 'members').split()
team_repos = []
if config.has_option(section, 'repos'):
team_repos = config.get(section, 'repos').split()
teams[name] = Team(name, permission,
members=members, repos=team_repos)
# add repos to teams (defined with repo: section
for section in config.sections():
if section.startswith('repo:'):
if config.has_option(section, 'teams'):
for team in config.get(section, 'teams').split():
teams[TEAM_PREFIX + team].repos.add(
section[len('repo:'):],
)
return teams, repos, fork_urls
class ConfigGithub(Config):
def __init__(self, github, cache, verbose=False, pretend=False):
self.github = github
self._github = {'teams': {}, 'repos': {}}
data = None
if cache:
data = cache.read()
super(ConfigGithub, self).__init__(data, verbose, pretend)
if cache and not data:
print 'CACHE DOES NOT EXISTS! CACHING...'
self.dumps(cache)
print 'CACHE WRITTEN TO %s!' % cache.name
def _get_teams(self):
if 'teams' not in self._github.keys() or \
not self._github['teams']:
self._github['teams'] = {}
for item in self.github._gh_org_teams():
if not item['name'].startswith(TEAM_PREFIX):
continue
item.update(self.github._gh_team(item['id']))
team = Team(**item)
if team.members_count > 0:
team.members.update([i['login']
for i in self.github._gh_team_members(item['id'])])
if team.repos_count > 0:
team.repos.update([i['name']
for i in self.github._gh_team_repos(item['id'])])
self._github['teams'][team.name] = team
return self._github['teams']
def _set_teams(self, value):
self._github['teams'] = value
def _del_teams(self):
del self._github['teams']
_teams = property(_get_teams, _set_teams, _del_teams)
def _get_repos(self):
if 'repos' not in self._github.keys() or \
not self._github['repos']:
self._github['repos'] = {}
for item in self.github._gh_org_repos():
hooks = []
for hook in self.github._gh_org_repo_hooks(item['name']):
hooks.append(Hook(**hook))
repo = Repo(hooks=hooks, **item)
self._github['repos'][repo.name] = repo
return self._github['repos']
def _set_repos(self, value):
self._github['repos'] = value
def _del_repos(self):
del self._github['repos']
_repos = property(_get_repos, _set_repos, _del_repos)
| collective/github-collective | githubcollective/config.py | Python | bsd-2-clause | 12,429 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import traceback
import random
try:
import pydot
except:
pydot = None
import hashlib
import OpenSSL.crypto
from calvin.utilities import calvinlogger
from calvin.runtime.south.plugins.storage.twistedimpl.securedht import append_server
from calvin.runtime.south.plugins.storage.twistedimpl.securedht import dht_server
from calvin.runtime.south.plugins.storage.twistedimpl.securedht import service_discovery_ssdp
from calvin.utilities import certificate
from kademlia.node import Node
from kademlia.utils import deferredDict, digest
from kademlia.crawling import NodeSpiderCrawl
from calvin.utilities import calvinconfig
_conf = calvinconfig.get()
_log = calvinlogger.get_logger(__name__)
def generate_challenge():
""" Generate a random challenge of 8 bytes, hex string formated"""
return os.urandom(8).encode("hex")
class evilAutoDHTServer(dht_server.AutoDHTServer):
def __init__(self, *args, **kwargs):
super(evilAutoDHTServer, self).__init__(*args, **kwargs)
self.cert_conf = certificate.Config(_conf.get("security", "certificate_conf"),
_conf.get("security", "certificate_domain")).configuration
def start(self, iface='', network=None, bootstrap=None, cb=None, type=None, name=None):
if bootstrap is None:
bootstrap = []
name_dir = os.path.join(self.cert_conf["CA_default"]["runtimes_dir"], name)
filename = os.listdir(os.path.join(name_dir, "mine"))
st_cert = open(os.path.join(name_dir, "mine", filename[0]), 'rt').read()
cert_part = st_cert.split(certificate.BEGIN_LINE)
certstr = "{}{}".format(certificate.BEGIN_LINE, cert_part[1])
try:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
certstr)
except:
logger(self.sourceNode,
"Certificate creating failed at startup")
key = cert.digest("sha256")
newkey = key.replace(":", "")
bytekey = newkey.decode("hex")
if network is None:
network = _conf.get_in_order("dht_network_filter", "ALL")
if network is None:
network = _conf.get_in_order("dht_network_filter", "ALL")
self.dht_server = dht_server.ServerApp(evilAppendServer, bytekey[-20:])
ip, port = self.dht_server.start(iface=iface)
dlist = []
dlist.append(self.dht_server.bootstrap(bootstrap))
self._ssdps = service_discovery_ssdp.SSDPServiceDiscovery(iface,
cert=certstr)
dlist += self._ssdps.start()
_log.debug("Register service %s %s:%s" % (network, ip, port))
self._ssdps.register_service(network, ip, port)
_log.debug("Set client filter %s" % (network))
self._ssdps.set_client_filter(network)
start_cb = service_discovery_ssdp.defer.Deferred()
def bootstrap_proxy(addrs):
def started(args):
_log.debug("DHT Started %s" % (args))
if not self._started:
service_discovery_ssdp.reactor.callLater(.2,
start_cb.callback,
True)
if cb:
service_discovery_ssdp.reactor.callLater(.2,
cb,
True)
self._started = True
def failed(args):
_log.debug("DHT failed to bootstrap %s" % (args))
#reactor.callLater(.5, bootstrap_proxy, addrs)
_log.debug("Trying to bootstrap with %s" % (repr(addrs)))
d = self.dht_server.bootstrap(addrs)
d.addCallback(started)
d.addErrback(failed)
def start_msearch(args):
_log.debug("** msearch %s args: %s" % (self, repr(args)))
service_discovery_ssdp.reactor.callLater(0,
self._ssdps.start_search,
bootstrap_proxy,
stop=False)
# Wait until servers all listen
dl = service_discovery_ssdp.defer.DeferredList(dlist)
dl.addBoth(start_msearch)
self.dht_server.kserver.protocol.evilType = type
self.dht_server.kserver.protocol.sourceNode.port = port
self.dht_server.kserver.protocol.sourceNode.ip = "0.0.0.0"
self.dht_server.kserver.name = name
self.dht_server.kserver.protocol.name = name
self.dht_server.kserver.protocol.storeOwnCert(certstr)
self.dht_server.kserver.protocol.setPrivateKey()
return start_cb
class evilKademliaProtocolAppend(append_server.KademliaProtocolAppend):
def _timeout(self, msgID):
self._outstanding[msgID][0].callback((False, None))
del self._outstanding[msgID]
def callPing(self, nodeToAsk, id=None):
address = (nodeToAsk.ip, nodeToAsk.port)
challenge = generate_challenge()
try:
private = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
self.priv_key,
'')
signature = OpenSSL.crypto.sign(private,
challenge,
"sha256")
except:
"Signing ping failed"
if id:
decider = random.random()
if decider < 0.5:
self.ping(address, id, challenge, signature, self.getOwnCert())
else:
self.ping(address, id, challenge, signature)
else:
d = self.ping(address, self.sourceNode.id, challenge, signature, self.getOwnCert())
return True
def turn_evil(self, evilPort):
old_ping = self.rpc_ping
old_find_node = self.rpc_find_node
old_find_value = self.rpc_find_value
self.router.node.port = evilPort;
if self.evilType == "poison":
self.rpc_find_node = self.poison_rpc_find_node
self.rpc_find_value = self.poison_rpc_find_value
self.false_neighbour_list = []
for i in range(0, 30):
fakeid = hashlib.sha1(str(random.getrandbits(255))).digest()
fake_neighbour = [fakeid,
'10.0.0.9',
self.router.node.port]
self.false_neighbour_list.append(fake_neighbour)
_log.debug("Node with port {} prepared to execute "
"poisoning attack".format(self.router.node.port))
elif self.evilType == "insert":
self.rpc_find_node = self.sybil_rpc_find_node
self.rpc_find_value = self.poison_rpc_find_value
ends = bytearray([0x01, 0x02, 0x03])
self.false_neighbour_list = []
for i in range(0, 9):
if i < 3:
key = digest("APA")
elif i > 5:
key = digest("KANIN")
else:
key = digest("KOALA")
key = key[:-1] + bytes(ends[i % 3])
self.false_neighbour_list.append((key,
'10.0.0.9',
self.router.node.port))
_log.debug("Node with port {} prepared to execute node "
"insertion attack".format(self.router.node.port))
elif self.evilType == "eclipse":
self.rpc_find_node = self.eclipse_rpc_find_node
self.rpc_find_value = self.eclipse_rpc_find_value
self.closest_neighbour = map(list,
self.router.findNeighbors((self.router.node)))
self.false_neighbour_list = []
for i in range(0, 10):
fakeid = hashlib.sha1(str(random.getrandbits(255))).digest()
self.false_neighbour_list.append((fakeid,
'10.0.0.9',
self.router.node.port))
_log.debug("Node with port {} prepared to execute eclipse "
"attack on {}".format(self.router.node.port,
self.closest_neighbour[0][2]))
elif self.evilType == "sybil":
self.rpc_find_node = self.sybil_rpc_find_node
self.rpc_find_value = self.poison_rpc_find_value
self.false_neighbour_list = []
for i in range(0, 30):
fakeid = [hashlib.sha1(str(random.getrandbits(255))).digest()]
fake_neighbour = [fakeid, '10.0.0.9', self.router.node.port]
self.false_neighbour_list.append(fake_neighbour)
_log.debug("Node with port {} prepared to execute "
"Sybil attack".format(self.router.node.port))
def poison_routing_tables(self):
fakeid = hashlib.sha1(str(random.getrandbits(255))).digest()
self.neighbours = map(list, self.router.findNeighbors(Node(fakeid),
k=20))
my_randoms = random.sample(xrange(len(self.neighbours)), 1)
for nodeToAttack in my_randoms:
for nodeToImpersonate in range(0, len(self.neighbours)):
if nodeToImpersonate != nodeToAttack:
node = Node(self.neighbours[nodeToAttack][0],
self.neighbours[nodeToAttack][1],
self.neighbours[nodeToAttack][2])
self.callPing(node, self.neighbours[nodeToImpersonate][0])
# def eclipse(self):
# self.neighbours = map(list, self.router.findNeighbors(Node(hashlib.sha1(str(random.getrandbits(255))).digest()),k=20))
# for nodeToAttack in range(0, len(self.neighbours)):
# self.ping((self.neighbours[nodeToAttack][1], self.neighbours[nodeToAttack][2]), self.closest_neighbour[0][0], self.getOwnCert())
# self.ping((self.closest_neighbour[0][1], self.closest_neighbour[0][2]), self.neighbours[nodeToAttack][0], self.getOwnCert())
# def sybil(self, node):
# self.ping((node[0], node[1]), hashlib.sha1(str(random.getrandbits(255))).digest(), self.getOwnCert())
# def sybil_rpc_find_node(self, sender, nodeid, key, challenge, signature):
# self.log.info("finding neighbors of %i in local table" % long(nodeid.encode('hex'), 16))
# source = Node(nodeid, sender[0], sender[1])
# self.maybeTransferKeyValues(source)
# self.router.addContact(source)
# self.false_neighbour_list = random.sample(self.false_neighbour_list, len(self.false_neighbour_list))
# return self.false_neighbour_list
# def eclipse_rpc_find_node(self, sender, nodeid, key, challenge, signature):
# self.log.info("finding neighbors of %i in local table" % long(nodeid.encode('hex'), 16))
# source = Node(nodeid, sender[0], sender[1])
# _log.debug("eclipse rpc_find_node sender=%s, source=%s, key=%s" % (sender, source, base64.b64encode(key)))
# self.maybeTransferKeyValues(source)
# self.router.addContact(source)
# node = Node(key)
# decider = random.random()
# if decider < 0.1:
# self.eclipse()
# self.neighbours = map(list, self.router.findNeighbors(Node(hashlib.sha1(str(random.getrandbits(255))).digest()),k=20))
# neighbourList = list(self.neighbours)
# if long(nodeid.encode('hex'), 16) != long(self.closest_neighbour[0][0].encode('hex'), 16):
# for i in range(0, len(neighbourList)):
# if neighbourList[i][0] is self.closest_neighbour[0][0]:
# neighbourList[i] = (neighbourList[i][0], neighbourList[i][1], self.router.node.port)
# else:
# for i in range(0, len(neighbourList)):
# neighbourList[i] = (neighbourList[i][0], neighbourList[i][1], self.router.node.port)
# mergedlist = []
# mergedlist.extend(neighbourList)
# mergedlist.extend(self.false_neighbour_list)
# self.neighbours = random.sample(mergedlist, len(mergedlist))
# self.neighbours = list(mergedlist)
# return self.neighbours
def poison_rpc_find_node(self, sender, nodeid, key, challenge, signature):
source = Node(nodeid, sender[0], sender[1])
# self.maybeTransferKeyValues(source)
self.router.addContact(source)
node = Node(key)
decider = random.random()
fakeid = hashlib.sha1(str(random.getrandbits(255))).digest()
self.neighbours = map(list, self.router.findNeighbors(Node(fakeid),k=20))
if decider < 0.1:
self.poison_routing_tables()
elif decider > 0.95:
fakeid1 = hashlib.sha1(str(random.getrandbits(255))).digest()
fakeid2 = hashlib.sha1(str(random.getrandbits(255))).digest()
self.find_value((self.neighbours[0][1], self.neighbours[0][2]),
fakeid1,
fakeid2,
challenge,
signature)
elif decider > 0.9:
self.find_node((self.neighbours[0][1], self.neighbours[0][2]),
nodeid,
self.neighbours[0][0],
challenge,
signature)
neighbourList = list(self.neighbours)
for i in range(0, len(neighbourList)):
neighbourList[i] = [neighbourList[i][0],
neighbourList[i][1],
self.router.node.port]
mergedlist = []
mergedlist.extend(neighbourList)
mergedlist.extend(self.false_neighbour_list)
try:
private = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
self.priv_key,
'')
signature = OpenSSL.crypto.sign(private,
challenge,
"sha256")
except:
_log.debug("signing poison find node failed")
return { 'bucket' : mergedlist , 'signature' : signature }
def poison_rpc_find_value(self, sender, nodeid, key, challenge, signature):
value = self.storage[digest(str(self.sourceNode.id.encode("hex").upper()) + "cert")]
if key == digest("APA") or \
key == digest("KANIN") or \
key == digest("KOALA"):
logger(self.sourceNode,
"Attacking node with port {} sent back "
"forged value".format(self.router.node.port))
value = "apelsin"
try:
private = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
self.priv_key,
'')
signature = OpenSSL.crypto.sign(private,
challenge,
"sha256")
except:
_log.debug("signing poison find value failed")
return { 'value': value, 'signature': signature }
# def eclipse_rpc_find_value(self, sender, nodeid, key, challenge, signature):
# source = Node(nodeid, sender[0], sender[1])
# if long(nodeid.encode('hex'), 16) != long(self.closest_neighbour[0][0].encode('hex'), 16):
# _log.debug("Attacking node with port {} received value request from non-eclipsed node".format(self.router.node.port))
# self.maybeTransferKeyValues(source)
# self.router.addContact(source)
# exists, value = self.storage.get(key, None)
# if not exists:
# return self.rpc_find_node(sender, nodeid, key, challenge, signature)
# else:
# return { 'value': value }
# else:
# _log.debug("Attacking node with port {} received value request from eclipsed node".format(self.router.node.port))
# return { 'value' : 'apelsin' }
class evilAppendServer(append_server.AppendServer):
def __init__(self, ksize=20, alpha=3, id=None, storage=None):
storage = storage or append_server.ForgetfulStorageFix()
append_server.Server.__init__(self,
ksize,
alpha,
id,
storage=storage)
self.protocol = evilKademliaProtocolAppend(self.node,
self.storage,
ksize)
def bootstrap(self, addrs):
"""
Bootstrap the server by connecting to other known nodes in the network.
Args:
addrs: A `list` of (ip, port) `tuple` pairs. Note that only IP addresses
are acceptable - hostnames will cause an error.
"""
# if the transport hasn't been initialized yet, wait a second
# _log.debug("bootstrap"
if self.protocol.transport is None:
return append_server.task.deferLater(service_discovery_ssdp.reactor,
1,
self.bootstrap,
addrs)
def initTable(results, challenge, id):
nodes = []
for addr, result in results.items():
ip = addr[0]
port = addr[1]
if result[0]:
resultId = result[1]['id']
resultIdHex = resultId.encode('hex').upper()
resultSign = result[1]['signature']
data = self.protocol.certificateExists(resultIdHex)
if not data:
identifier = "{}cert".format(resultIdHex)
self.protocol.callCertFindValue(Node(resultId,
ip,
port),
Node(identifier))
else:
cert_stored = self.protocol.searchForCertificate(resultIdHex)
try:
OpenSSL.crypto.verify(cert_stored,
resultSign,
challenge,
"sha256")
except:
traceback.print_exc()
nodes.append(Node(resultId, ip, port))
spider = NodeSpiderCrawl(self.protocol,
self.node,
nodes,
self.ksize,
self.alpha)
return spider.find()
ds = {}
challenge = generate_challenge()
id = None
if addrs:
data = addrs[0]
addr = (data[0], data[1])
try:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
data[2])
fingerprint = cert.digest("sha256")
id = fingerprint.replace(":", "")[-40:]
private = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
self.protocol.priv_key,
'')
signature = OpenSSL.crypto.sign(private,
"{}{}".format(id, challenge),
"sha256")
ds[addr] = self.protocol.ping(addr,
self.node.id,
challenge,
signature,
self.protocol.getOwnCert())
except:
logger(self.protocol.sourceNode, "Certificate creation failed")
self.protocol.storeCert(data[2], id)
node = Node(id.decode("hex"), data[0], data[1])
if self.protocol.router.isNewNode(node):
return deferredDict(ds).addCallback(initTable, challenge, id)
return deferredDict(ds)
def drawNetworkState(name, servers, amount_of_servers):
"""Save image describinh network of `servers` as `name`."""
if pydot is None:
return
graph = pydot.Dot(graph_type='digraph',
nodesep=0,
ranksep=0,
rankdir="BT")
for servno in range(0, amount_of_servers):
rndnode = Node(hashlib.sha1(str(random.getrandbits(255))).digest())
findNeighbors = servers[servno].dht_server.kserver.protocol.router.findNeighbors
neighbors = map(tuple, findNeighbors(rndnode, k=50))
for neighbor in neighbors:
printPort = servers[servno].dht_server.port.getHost().port
edge = pydot.Edge(str(printPort),
str(neighbor[2]),
label=str(neighbor[0].encode('hex')[-4:]))
graph.add_edge(edge)
graph.write_png(name)
def logger(node, message, level=None):
_log.debug("{}:{}:{} - {}".format(node.id.encode("hex").upper(),
node.ip,
node.port,
message))
# print("{}:{}:{} - {}".format(node.id.encode("hex").upper(), node.ip, node.port, message))
| les69/calvin-base | calvin/runtime/south/plugins/storage/twistedimpl/securedht/dht_server_commons.py | Python | apache-2.0 | 22,795 |
from miniworld import log
from miniworld.Scenario import scenario_config
from miniworld.model.network.linkqualitymodels import LinkQualityModel, LinkQualityConstants
__author__ = 'Nils Schmidt'
class LinkQualityModelRange(LinkQualityModel.LinkQualityModel):
#####################################################
# Implement these methods in a subclass
#####################################################
# TODO:
def _distance_2_link_quality(self, distance):
default_link_quality = {}
if 0 <= distance < 30:
default_link_quality.update({
LinkQualityConstants.LINK_QUALITY_KEY_LOSS: 0
})
return True, default_link_quality
return False, None
# TODO: extract
class LinkQualityModelNetEm(LinkQualityModel.LinkQualityModel):
# TODO:
NETEM_KEY_LOSS = "loss"
NETEM_KEY_LIMIT = "limit"
NETEM_KEY_DELAY = "delay"
NETEM_KEY_CORRUPT = "corrupt"
NETEM_KEY_DUPLICATE = "duplicate"
NETEM_KEY_REORDER = "reorder"
NETEM_KEY_RATE = "rate"
# order of options that netem needs
NETEM_KEYS = (
NETEM_KEY_LIMIT, NETEM_KEY_DELAY, NETEM_KEY_LOSS, NETEM_KEY_CORRUPT, NETEM_KEY_DUPLICATE, NETEM_KEY_REORDER,
NETEM_KEY_RATE)
class LinkQualityModelWiFiLinear(LinkQualityModelNetEm):
MAX_BANDWIDTH = 54000
log.info("max_bandwidth: %s" % MAX_BANDWIDTH)
#####################################################
# Implement these methods in a subclass
#####################################################
def _distance_2_link_quality(self, distance):
distance = distance * 1.0
default_link_quality = \
{self.NETEM_KEY_LOSS: None,
self.NETEM_KEY_LIMIT: None,
self.NETEM_KEY_DELAY: None,
self.NETEM_KEY_CORRUPT: None,
self.NETEM_KEY_DUPLICATE: None,
self.NETEM_KEY_REORDER: None,
self.NETEM_KEY_RATE: None
}
# distribute bandwidth linear for dist in [0, 30)
# TODO: other way than defining maximum bandwidth?
max_bandwidth = scenario_config.get_link_bandwidth() or self.MAX_BANDWIDTH
distance += 1
if distance >= 0:
distance = distance / 2
if distance >= 0:
bandwidth = 1.0 * max_bandwidth / distance if distance > 1 else max_bandwidth
default_link_quality[LinkQualityConstants.LINK_QUALITY_KEY_BANDWIDTH] = bandwidth
delay_const = (distance - 1) * 2 if distance > 1 else 0
delay_const_str = '%.2f' % delay_const
delay_variation = delay_const / 10.0
delay_variation_str = '%.2f' % delay_variation
delay_cmd = "{delay_const}ms {delay_var}ms 25%".format(delay_const=delay_const_str,
delay_var=delay_variation_str)
# delay_cmd = "{delay_const} {delay_var} distribution normal".format(delay_const=delay_const, delay_var=delay_variation)
default_link_quality[self.NETEM_KEY_DELAY] = delay_cmd
# return bandwidth, delay_const, delay_variation
if bandwidth >= 1000:
return True, default_link_quality
return False, default_link_quality
class LinkQualityModelWiFiExponential(LinkQualityModelWiFiLinear):
#####################################################
# Implement these methods in a subclass
#####################################################
# TODO: Abstract!
def _distance_2_link_quality(self, distance):
"""
"""
distance = distance * 1.0
default_link_quality = \
{self.NETEM_KEY_LOSS: None,
self.NETEM_KEY_LIMIT: None,
self.NETEM_KEY_DELAY: None,
self.NETEM_KEY_CORRUPT: None,
self.NETEM_KEY_DUPLICATE: None,
self.NETEM_KEY_REORDER: None,
self.NETEM_KEY_RATE: None
}
# distribute bandwidth linear for dist in [0, 30)
# TODO: other way than defining maximum bandwidth?
max_bandwidth = scenario_config.get_link_bandwidth() or self.MAX_BANDWIDTH
if distance >= 0:
bandwidth_divisor = 2 ** int(distance / 4.0)
bandwidth = 1.0 * max_bandwidth / bandwidth_divisor if distance >= 1 else max_bandwidth
default_link_quality[LinkQualityConstants.LINK_QUALITY_KEY_BANDWIDTH] = bandwidth
delay_const = bandwidth_divisor
delay_const_str = '%.2f' % delay_const
delay_variation = delay_const / 10.0
delay_variation_str = '%.2f' % delay_variation
delay_cmd = "{delay_const}ms {delay_var}ms 25%".format(delay_const=delay_const_str,
delay_var=delay_variation_str)
# delay_cmd = "{delay_const} {delay_var} distribution normal".format(delay_const=delay_const, delay_var=delay_variation)
default_link_quality[self.NETEM_KEY_DELAY] = delay_cmd
# return bandwidth, delay_const, delay_variation
if bandwidth >= 1000:
return True, default_link_quality
return False, default_link_quality
if __name__ == '__main__':
# print LinkQualityModelWiFi().distance_2_link_quality(0)
# print LinkQualityModelWiFi().distance_2_link_quality(0.5)
# print LinkQualityModelWiFi().distance_2_link_quality(1)
# print LinkQualityModelWiFi().distance_2_link_quality(1.1)
# print LinkQualityModelWiFi().distance_2_link_quality(2.1)
# print LinkQualityModelWiFi().distance_2_link_quality(3)
# print LinkQualityModelWiFi().distance_2_link_quality(100)
values = []
for x in range(0, 30):
vals1 = LinkQualityModelWiFiLinear()._distance_2_link_quality(x)
vals2 = LinkQualityModelWiFiExponential()._distance_2_link_quality(x)
values.append([x] + list(vals1) + list(vals2))
print('\n'.join(
[("\\trowgray\n" if val[0] % 2 == 0 else "") + "\\hline\n%s & %.00f & %s & %s & %.00f & %s & %s \\\\" % tuple(
val) for val in values]))
| miniworld-project/miniworld_core | miniworld/model/network/linkqualitymodels/LinkQualityModelRange.py | Python | mit | 6,220 |
# Copyright (c) 2010, Robert Escriva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Firmant nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import firmant.objects
__all__ = ['add_parser', 'add_writer', 'main']
_modules = []
_parsers = []
_writers = []
def add_module(module):
_modules.append(module)
def add_parser(parser):
_parsers.append(parser)
def add_writer(writer):
_writers.append(writer)
def main():
# Initialize all modules.
for module in _modules:
mod()
# For each parser, parse all objects the parser can handle.
for parser in _parsers:
if hasattr(parser, 'parse_all'):
parser.parse_all()
# Generate additional objects.
iterated = set()
while _parsers and len(iterated) != len(_parsers):
for parser in _parsers:
if parser not in iterated and hasattr(parser, 'iterate'):
if parser.iterate():
iterated.add(parser)
elif parser not in iterated:
iterated.add(parser)
# Check for URL conflicts.
permalinks = set()
for key, obj in firmant.objects.retrieve():
url = firmant.urls.url(key)
if url and url in permalinks:
print('The permalink', url, 'is overloaded.')
if url:
permalinks.add(url)
urls = set()
for writer in _writers:
if hasattr(writer, 'urls'):
wurls = writer.urls()
for url in wurls:
if url in urls:
print('The URL', url, 'is overloaded.')
urls.add(url)
for url in permalinks - urls:
print('The permalink', url, 'is not created by any writer.')
# Update objects to use the now exposed URLs
for key, obj in firmant.objects.retrieve():
if hasattr(obj, 'update_urls'):
obj.update_urls()
# Generate the site.
for writer in _writers:
if hasattr(writer, 'write_all'):
writer.write_all()
| rescrv/firmant | firmant/__init__.py | Python | bsd-3-clause | 3,563 |
#!/usr/bin/env python
''' Christmas light controller. '''
from mince import Lights, Colour, get_light_options
from mince.effects import FXRunner
from mince.effects.value import RandomTwinkleFX
import atexit
import time
import requests
URL = "http://api.thingspeak.com/channels/1417/field/1/last.txt"
def colour_all(effects, colour):
''' Sets each light to the same colour '''
effects.colours = [colour] * effects.num_lights
def main():
''' Set the colour of the lights using the CheerLights feed. '''
lights = get_light_options()
fx = FXRunner(lights, [Colour(0, 0, 0)] * lights.num_lights)
fx.run(RandomTwinkleFX, speed=1.0)
last_colour = None
bright = None
check_time = 0
atexit.register(lambda: fx.stop())
while True:
if time.time() - check_time > 30:
# Get next colour
request = requests.get(URL)
# Did we get the page?
tmp_colour = None
if request.status_code == requests.codes.ok:
# Special cheerlights colour
if request.text.lower() == "warmwhite":
tmp_colour = Colour.named("oldlace")
elif request.text.lower() == "off":
tmp_colour = Colour.named("black")
else:
tmp_colour = Colour.named(request.text)
if tmp_colour is not None:
if tmp_colour != last_colour:
print "Setting lights to %s" % tmp_colour
# Set the lights to the colour
colour_all(fx, tmp_colour)
# Keep track of last colour set
last_colour = tmp_colour
else:
print "Unrecognised colour"
# Turn off all the lights
colour_all(fx, Colour(0, 0, 0))
else:
print "Problem getting page: %d" % request.status_code
check_time = time.time()
if __name__ == '__main__':
main()
| snorecore/MincePi | scripts/cheer.py | Python | mit | 2,060 |
# -*- coding: utf-8 -*-
""" Sahana Eden Fire Station Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3FireStationModel"]
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3FireStationModel(S3Model):
"""
A Model to manage Fire Stations:
http://eden.sahanafoundation.org/wiki/Deployments/Bombeiros
"""
names = ["fire_station",
"fire_station_vehicle",
"fire_water_source",
"fire_hazard_point",
"fire_staff_on_duty"
]
def model(self):
T = current.T
db = current.db
request = current.request
person_id = self.pr_person_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
human_resource_id = self.hrm_human_resource_id
ireport_id = self.irs_ireport_id
vehicle_id = self.vehicle_vehicle_id
add_component = self.add_component
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# =====================================================================
# Fire Station
#
fire_station_types = {
1: T("Fire Station"),
9: T("Unknown type of facility"),
}
tablename = "fire_station"
table = define_table(tablename,
self.super_link("site_id", "org_site"),
Field("name", notnull=True, length=64,
label = T("Name")),
Field("code", unique=True, length=64,
label = T("Code")),
Field("facility_type", "integer",
label = T("Facility Type"),
requires = IS_NULL_OR(IS_IN_SET(fire_station_types)),
default = 1,
represent = lambda opt: \
fire_station_types.get(opt, T("not specified"))
),
organisation_id(),
location_id(),
Field("phone", label = T("Phone"),
requires = IS_NULL_OR(s3_phone_requires)),
Field("website", label=T("Website"),
requires = IS_NULL_OR(IS_URL()),
represent = lambda url: s3_url_represent(url)),
Field("email", label = T("Email"),
requires = IS_NULL_OR(IS_EMAIL())
),
Field("fax", label = T("Fax"),
requires = IS_NULL_OR(s3_phone_requires)),
Field("obsolete", "boolean",
label = T("Obsolete"),
represent = lambda bool: \
(bool and [T("Obsolete")] or [current.messages.NONE])[0],
default = False,
readable = False,
writable = False),
s3_comments(),
*s3_meta_fields())
self.configure("fire_station",
super_entity="org_site")
station_id = S3ReusableField("station_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db, "fire_station.id",
self.fire_station_represent)),
represent = self.fire_station_represent,
label = T("Station"),
ondelete = "CASCADE"
)
# CRUD strings
ADD_FIRE_STATION = T("Add Fire Station")
crud_strings[tablename] = Storage(
title_create = ADD_FIRE_STATION,
title_display = T("Fire Station Details"),
title_list = T("Fire Stations"),
title_update = T("Edit Station Details"),
title_search = T("Search for Fire Station"),
title_upload = T("Upload Fire Stations List"),
title_map = T("Map of Fire Stations"),
subtitle_create = T("Add New Fire Station"),
label_list_button = T("List Fire Stations"),
label_create_button = ADD_FIRE_STATION,
label_delete_button = T("Delete Fire Station"),
msg_record_created = T("Fire Station added"),
msg_record_modified = T("Fire Station updated"),
msg_record_deleted = T("Fire Station deleted"),
msg_no_match = T("No Fire Stations could be found"),
msg_list_empty = T("No Fire Stations currently registered"))
add_component("vehicle_vehicle",
fire_station = Storage(link="fire_station_vehicle",
joinby="station_id",
key="vehicle_id",
actuate="replace"))
add_component("fire_shift",
fire_station = "station_id")
add_component("fire_shift_staff",
fire_station = "station_id")
# =====================================================================
# Vehicles of Fire stations
#
tablename = "fire_station_vehicle"
table = define_table(tablename,
station_id(),
vehicle_id(),
*s3_meta_fields()
)
# CRUD strings
ADD_VEHICLE = T("Add Vehicle")
crud_strings[tablename] = Storage(
title_create = ADD_VEHICLE,
title_display = T("Vehicle Details"),
title_list = T("Vehicles"),
title_update = T("Edit Vehicle Details"),
title_search = T("Search for Vehicles"),
title_upload = T("Upload Vehicles List"),
subtitle_create = T("Add New Vehicle"),
label_list_button = T("List Vehicles"),
label_create_button = ADD_VEHICLE,
label_delete_button = T("Delete Vehicle"),
msg_record_created = T("Vehicle added"),
msg_record_modified = T("Vehicle updated"),
msg_record_deleted = T("Vehicle deleted"),
msg_no_match = T("No Vehicles could be found"),
msg_list_empty = T("No Vehicles currently registered"))
self.set_method("fire", "station",
method="vehicle_report",
action=self.vehicle_report)
# =====================================================================
# Water Sources
#
tablename = "fire_water_source"
table = define_table(tablename,
Field("name", "string"),
location_id(),
#Field("good_for_human_usage", "boolean"),
#Field("fresh", "boolean"),
#Field("Salt", "boolean"),
#Field("toponymy", "string"),
#Field("parish", "string"),
#Field("type", "string"),
#Field("owner", "string"),
#person_id(),
#organisation_id(),
#Field("shape", "string"),
#Field("diameter", "string"),
#Field("depth", "string"),
#Field("volume", "integer"),
#Field("lenght", "integer"),
#Field("height", "integer"),
#Field("usefull_volume", "integer"),
#Field("catchment", "integer"),
#Field("area", "integer"),
#Field("date", "date"),
#Field("access_type", "string"),
#Field("previews_usage", "boolean"),
#Field("car_access", "string"),
#Field("mid_truck_access", "string"),
#Field("truck_access", "string"),
#Field("distance_from_trees", "integer"),
#Field("distance_from_buildings", "integer"),
#Field("helicopter_access", "string"),
#Field("previews_usage_air", "boolean"),
#Field("car_movment_conditions", "string"),
#Field("midtruck_movment_conditions", "string"),
#Field("truck_movment_conditions", "string"),
#Field("powerline_distance", "integer"),
#Field("distance_other_risks", "integer"),
#Field("anti_seismic_construction", "boolean"),
#Field("isolated_from_air", "boolean"),
#Field("hermetic", "boolean"),
s3_comments(),
*s3_meta_fields())
# =====================================================================
# Hazards
# - this is long-term hazards, not incidents
#
tablename = "fire_hazard_point"
table = define_table(tablename,
location_id(),
Field("name", "string"),
# What are the Org & Person for? Contacts?
organisation_id(),
person_id(),
s3_comments(),
*s3_meta_fields())
# =====================================================================
# Shifts
#
tablename = "fire_shift"
table = define_table(tablename,
station_id(),
Field("name"),
s3_datetime("start_time",
empty=False,
default="now"
),
s3_datetime("end_time",
empty=False,
default="now"
),
*s3_meta_fields())
shift_id = S3ReusableField("shift_id", table,
requires = IS_NULL_OR(
IS_ONE_OF(db, "fire_shift.id",
self.fire_shift_represent)),
represent = self.fire_shift_represent,
label = T("Shift"),
ondelete = "CASCADE")
# ---------------------------------------------------------------------
tablename = "fire_shift_staff"
table = define_table(tablename,
station_id(),
#shift_id(),
human_resource_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage(
# used by IRS
fire_staff_on_duty = self.fire_staff_on_duty
)
# -------------------------------------------------------------------------
@staticmethod
def fire_station_represent(id, row=None):
""" FK representation """
if row:
return row.name
elif not id:
return current.messages.NONE
db = current.db
table = db.fire_station
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def fire_shift_represent(shift):
"""
"""
db = current.db
table = db.fire_shift
if not isinstance(shift, Row):
shift = db(table.id == shift).select(table.start_time,
table.end_time,
limitby=(0, 1)).first()
return "%s - %s" % (shift.start_time, shift.end_time)
# -------------------------------------------------------------------------
@staticmethod
def fire_staff_on_duty(station_id=None):
"""
Return a query for hrm_human_resource filtering
for entries which are linked to a current shift
"""
db = current.db
staff = db.hrm_human_resource
roster = db.fire_shift_staff
query = (staff.id == roster.human_resource_id) & \
(roster.deleted != True)
if station_id is not None:
query &= (roster.station_id == station_id)
return query
# -------------------------------------------------------------------------
@staticmethod
def vehicle_report(r, **attr):
"""
Custom method to provide a report on Vehicle Deployment Times
- this is one of the main tools currently used to manage an Incident
"""
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
station_id = r.id
if station_id:
s3db = current.s3db
dtable = s3db.irs_ireport_vehicle
vtable = s3db.vehicle_vehicle
stable = s3db.fire_station_vehicle
query = (stable.station_id == station_id) & \
(stable.vehicle_id == vtable.id) & \
(vtable.asset_id == dtable.asset_id)
current.response.s3.crud_strings["irs_ireport_vehicle"] = Storage(
title_report = "Vehicle Deployment Times"
)
req = current.manager.parse_request("irs", "ireport_vehicle",
args=["report"],
vars=Storage(
rows = "asset_id",
cols = "ireport_id",
fact = "minutes",
aggregate = "sum"
))
req.set_handler("report", S3Cube())
req.resource.add_filter(query)
return req(rheader=rheader)
# END =========================================================================
| ashwyn/eden-message_parser | modules/eden/fire.py | Python | mit | 16,662 |
#!/usr/bin/env python
# Safe Eyes is a utility to remind you to take break frequently
# to protect your eyes from eye strain.
# Copyright (C) 2017 Gobinath
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Show health statistics on the break screen.
"""
import logging
context = None
no_of_skipped_breaks = 0
no_of_breaks = 0
no_of_cycles = -1
session = None
def init(ctx, safeeyes_config, plugin_config):
"""
Initialize the plugin.
"""
global context
global session
global no_of_skipped_breaks
global no_of_breaks
global no_of_cycles
logging.debug('Initialize Health Stats plugin')
context = ctx
if session is None:
session = context['session']['plugin'].get('healthstats', None)
if session is None:
session = {'no_of_skipped_breaks': 0, 'no_of_breaks': 0, 'no_of_cycles': -1}
context['session']['plugin']['healthstats'] = session
no_of_skipped_breaks = session.get('no_of_skipped_breaks', 0)
no_of_breaks = session.get('no_of_breaks', 0)
no_of_cycles = session.get('no_of_cycles', -1)
def on_stop_break():
"""
After the break, play the alert sound
"""
global no_of_skipped_breaks
if context['skipped']:
no_of_skipped_breaks += 1
session['no_of_skipped_breaks'] = no_of_skipped_breaks
def get_widget_title(break_obj):
"""
Return the widget title.
"""
global no_of_breaks
global no_of_cycles
no_of_breaks += 1
if context['new_cycle']:
no_of_cycles += 1
session['no_of_breaks'] = no_of_breaks
session['no_of_cycles'] = no_of_cycles
return _('Health Statistics')
def get_widget_content(break_obj):
"""
Return the statistics.
"""
return 'BREAKS: {}\tSKIPPED: {}\tCYCLES: {}'.format(no_of_breaks, no_of_skipped_breaks, no_of_cycles)
| bayuah/SafeEyes | safeeyes/plugins/healthstats/plugin.py | Python | gpl-3.0 | 2,436 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.