hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2ac16f5ce747685d532348fcebefd49a3a7cc2da | 4,961 | py | Python | test/layouts/test_matrix.py | body20002/qtile | e0f1968a96e9acf2b6c9562fae44802ffc9951ec | [
"MIT"
] | 1 | 2021-10-01T18:33:18.000Z | 2021-10-01T18:33:18.000Z | test/layouts/test_matrix.py | body20002/qtile | e0f1968a96e9acf2b6c9562fae44802ffc9951ec | [
"MIT"
] | null | null | null | test/layouts/test_matrix.py | body20002/qtile | e0f1968a96e9acf2b6c9562fae44802ffc9951ec | [
"MIT"
] | 1 | 2022-03-01T21:23:12.000Z | 2022-03-01T21:23:12.000Z | # Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 Mattias Svala
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Chris Wesseling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import libqtile.config
from libqtile import layout
from libqtile.confreader import Config
from test.layouts.layout_utils import assert_focus_path, assert_focused
class MatrixConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d"),
]
layouts = [layout.Matrix(columns=2)]
floating_layout = libqtile.resources.default_config.floating_layout
keys = []
mouse = []
screens = []
matrix_config = pytest.mark.parametrize("manager", [MatrixConfig], indirect=True)
@matrix_config
def test_matrix_simple(manager):
manager.test_window("one")
assert manager.c.layout.info()["rows"] == [["one"]]
manager.test_window("two")
assert manager.c.layout.info()["rows"] == [["one", "two"]]
manager.test_window("three")
assert manager.c.layout.info()["rows"] == [["one", "two"], ["three"]]
@matrix_config
def test_matrix_navigation(manager):
manager.test_window("one")
manager.test_window("two")
manager.test_window("three")
manager.test_window("four")
manager.test_window("five")
manager.c.layout.right()
assert manager.c.layout.info()["current_window"] == (0, 2)
manager.c.layout.up()
assert manager.c.layout.info()["current_window"] == (0, 1)
manager.c.layout.up()
assert manager.c.layout.info()["current_window"] == (0, 0)
manager.c.layout.up()
assert manager.c.layout.info()["current_window"] == (0, 2)
manager.c.layout.down()
assert manager.c.layout.info()["current_window"] == (0, 0)
manager.c.layout.down()
assert manager.c.layout.info()["current_window"] == (0, 1)
manager.c.layout.right()
assert manager.c.layout.info()["current_window"] == (1, 1)
manager.c.layout.right()
assert manager.c.layout.info()["current_window"] == (0, 1)
manager.c.layout.left()
assert manager.c.layout.info()["current_window"] == (1, 1)
@matrix_config
def test_matrix_add_remove_columns(manager):
manager.test_window("one")
manager.test_window("two")
manager.test_window("three")
manager.test_window("four")
manager.test_window("five")
manager.c.layout.add()
assert manager.c.layout.info()["rows"] == [["one", "two", "three"], ["four", "five"]]
manager.c.layout.delete()
assert manager.c.layout.info()["rows"] == [["one", "two"], ["three", "four"], ["five"]]
@matrix_config
def test_matrix_window_focus_cycle(manager):
# setup 3 tiled and two floating clients
manager.test_window("one")
manager.test_window("two")
manager.test_window("float1")
manager.c.window.toggle_floating()
manager.test_window("float2")
manager.c.window.toggle_floating()
manager.test_window("three")
# test preconditions
assert manager.c.layout.info()["clients"] == ["one", "two", "three"]
# last added window has focus
assert_focused(manager, "three")
# assert window focus cycle, according to order in layout
assert_focus_path(manager, "float1", "float2", "one", "two", "three")
@matrix_config
def test_matrix_next_no_clients(manager):
manager.c.layout.next()
@matrix_config
def test_matrix_previous_no_clients(manager):
manager.c.layout.previous()
def test_unknown_client():
"""Simple test to get coverage to 100%!"""
matrix = layout.Matrix()
# The layout will not configure an unknown client.
# Without the return statement in "configure" the following
# code would result in an error
assert matrix.configure("fakeclient", None) is None
| 34.93662 | 91 | 0.701673 |
4ae1f19ef2e083a28cbf223732cad07cddf36ee6 | 8,310 | py | Python | touha/cli.py | dem4ply/touha | 0a4815487dbdde12c8ff7ec22f76a8a8177ad964 | [
"WTFPL"
] | null | null | null | touha/cli.py | dem4ply/touha | 0a4815487dbdde12c8ff7ec22f76a8a8177ad964 | [
"WTFPL"
] | null | null | null | touha/cli.py | dem4ply/touha | 0a4815487dbdde12c8ff7ec22f76a8a8177ad964 | [
"WTFPL"
] | null | null | null | import os
import argparse
import sys
import datetime
from touha import Touhas
import logging
import random
from argparse import ArgumentParser
from chibi.file import Chibi_path
from chibi.file.temp import Chibi_temp_path
from chibi_requests import Chibi_url
from chibi_command.disk.dd import DD
from chibi_command.disk.mount import Mount, Umount
from chibi_command.disk.format import Ext4, Vfat
from chibi_command.file import Bsdtar
from touha.mount import _mount, _umount
from touha.spell_card import Spell_card
logger_formarter = '%(levelname)s %(name)s %(asctime)s %(message)s'
logger = logging.getLogger( 'touhas.cli' )
def get_touhas( args ):
backups = args.backup_path + 'touhas'
if backups.exists:
backups.mkdir()
touhas = Touhas( backups )
return touhas
def find_hostname_on_block( block, mnt ):
parts = block.dir_name.find( f'{block.base_name}.+' )
try:
for block in parts:
hostname = find_hostname_on_part( block, mnt )
if hostname:
return hostname
except PermissionError as e:
logger.warning( str( e ) )
def find_hostname_on_part( block, mnt ):
logger.info( f"revisando si {block} es root" )
if not mnt.exists:
mnt.mkdir()
if os.path.ismount( str( mnt ) ):
umount = Umount()( mnt )
logger.info( str( umount ) )
result = Mount()( block, mnt )
if not result:
raise PermissionError( result.error )
hostname = mnt + 'etc' + 'hostname'
if hostname.exists:
hostname = hostname.open().read().strip()
hostname = hostname.replace( '-', '_' )
return hostname
logger.info( str( Umount()( mnt ) ) )
def _list( args ):
touhas = get_touhas( args )
for touha in touhas:
print( touha )
print_backups( touhas[ touha ] )
def print_backups( touha ):
backups = sorted(
touha.backups, key=lambda b: b.date, reverse=True )
for i, backup in enumerate( backups ):
print( '\t', f"{i}.-", backup.path.file_name )
def _backup( args ):
touhas = get_touhas( args )
spell_card = Spell_card( block=args.block, mount_path=args.backup_path, )
touha = touhas[ spell_card.name ]
touha.new_backup( args.block )
return
def _restore( args ):
touhas = get_touhas( args )
block = args.block
args.date = datetime.datetime.strptime( args.date, "%Y-%m-%d" )
if args.touha:
touha_name = args.touha
else:
spell_card = Spell_card( block=args.block, mount_path=args.backup_path, )
touha_name = spell_card.name
try:
touha = touhas[ touha_name ]
except KeyError as e:
logger.info( f"no se encontro la touha {e}" )
return
for backup in touha.backups:
if backup.date == args.date:
break
else:
logger.info( f"no se encontro un backup de la fecha f{args.date}" )
logger.info( f"backups validos para f{touha.name}" )
print_backups( touha )
return
logger.info(
f"iniciando restauracion del backup {backup.path} en {block}" )
backup.restore( block )
def _format( args ):
block = args.block
boot = f"{block}p1"
root = f"{block}p2"
Vfat( boot ).run()
Ext4( root ).run()
if args.version == "4":
image_url = Chibi_url(
'http://os.archlinuxarm.org/os/ArchLinuxARM-rpi-4-latest.tar.gz' )
else:
raise NotImplementedError(
f"la version de rasp {args.version} no esta implementada" )
image_path = args.backup_path + 'image'
if not image_path.exists:
image_path.mkdir()
image = image_path + image_url.base_name
if not image.exists:
image = image_url.download( path=image_path )
spell_card = Spell_card( block=args.block, mount_path=args.backup_path, )
Bsdtar( '-xpf', image, '-C', spell_card.root ).run()
tmp_boot = spell_card.root + 'boot' + '*'
tmp_boot.move( spell_card.boot )
def main():
parser = argparse.ArgumentParser(
"tool for backup and restore rasberry pi sd cards" )
parser.add_argument(
"--log_level", dest="log_level", default="INFO",
help="nivel de log", )
parser.add_argument(
"-b", "--backup", type=Chibi_path, default='.', dest="backup_path",
help="backup path" )
sub_parsers = parser.add_subparsers(
dest='command', help='sub-command help' )
parser_list = sub_parsers.add_parser( 'list', help='list the backups', )
parser_backup = sub_parsers.add_parser( 'backup', help='do a backup', )
parser_backup.add_argument(
'--block', '-b', required=True, type=Chibi_path, help='block' )
parser_restore = sub_parsers.add_parser( 'restore', help='do a restore', )
parser_restore.add_argument(
'--block', '-b', required=True, type=Chibi_path, help='block' )
parser_restore.add_argument(
'--touha', '-t', required=False, type=Chibi_path, help='touha' )
parser_restore.add_argument(
'--date', '-d', required=True, help='date' )
parser_format = sub_parsers.add_parser( 'format', help='do a format', )
parser_format.add_argument(
'--block', '-b', required=True, type=Chibi_path, help='block' )
parser_format.add_argument(
'--version', '-v', required=True, help='raspberry pi version' )
parser_mount = sub_parsers.add_parser( 'mount', help='mount the touha', )
parser_mount.add_argument(
'--block', '-b', required=True, type=Chibi_path, help='block' )
parser_umount = sub_parsers.add_parser( 'umount', help='umount the touha', )
parser_spell_card = sub_parsers.add_parser(
'spell_card', help='check the spell card', )
parser_spell_card.add_argument(
'--block', '-b', required=True, type=Chibi_path, help='block' )
spell_card_sub_parser = parser_spell_card.add_subparsers(
dest='spell_card_command', help='spell_card help' )
parser_spell_card_print = spell_card_sub_parser.add_parser(
'list', help='check the spell card', )
parser_spell_card_print.add_argument(
'--home', action="store_true", help='decide if is going to print home' )
parser_spell_card_clone = spell_card_sub_parser.add_parser(
'backup', help='backup spell card', )
parser_spell_card_clone.add_argument(
'--destination', '-d', default='.', type=Chibi_path,
help='destino' )
parser_spell_card_clone.add_argument(
'--home', action="store_true", help='decide if is going to print home' )
parser_spell_card_restore = spell_card_sub_parser.add_parser(
'restore', help='backup spell card', )
parser_spell_card_restore.add_argument(
'--destination', '-d', default='.', type=Chibi_path,
help='destino' )
parser_spell_card_restore.add_argument(
'--touha', help='nombre de la touha a usar' )
args = parser.parse_args()
logging.basicConfig( level=args.log_level, format=logger_formarter )
if args.command == 'list':
_list( args )
elif args.command == 'backup':
_backup( args )
elif args.command == 'restore':
_restore( args )
elif args.command == 'format':
_format( args )
elif args.command == 'mount':
Spell_card(
block=args.block, mount_path=args.backup_path,
unmount_on_dead=False )
elif args.command == 'umount':
Spell_card( mount_path=args.backup_path, unmount_on_dead=True )
elif args.command == 'spell_card':
spell_card = Spell_card(
block=args.block, mount_path=args.backup_path,
unmount_on_dead=True )
if args.spell_card_command == "list":
spell_card.check_spell_card( home=args.home )
if args.spell_card_command == "backup":
spell_card.clone( path=args.destination, home=args.home, )
if args.spell_card_command == "restore":
spell_card.restore( path=args.destination, touha_name=args.touha, )
else:
logger.error(
"spell card commando no encontrado "
f"{args.spell_card_command}"
)
else:
logger.error( f"commando no encontrado {args.command}" )
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 31.596958 | 81 | 0.641155 |
83bef3f329e34b747b550cb3cf19898fd255acb4 | 138 | py | Python | Lesson12/Tuple&Hash.py | nittharatk/hello_python | 2beb257752ac968eb665ce17c5f003eb6b44333b | [
"MIT"
] | null | null | null | Lesson12/Tuple&Hash.py | nittharatk/hello_python | 2beb257752ac968eb665ce17c5f003eb6b44333b | [
"MIT"
] | null | null | null | Lesson12/Tuple&Hash.py | nittharatk/hello_python | 2beb257752ac968eb665ce17c5f003eb6b44333b | [
"MIT"
] | null | null | null | if name == 'main':
n = int(input())
integer_list = map(int, input().split())
t = tuple(integer_list)
print (hash(t)) | 23 | 45 | 0.543478 |
6005c89fb0bf02d136c0b611dc02f812c3b021c5 | 2,604 | py | Python | dace/registry.py | am-ivanov/dace | c35f0b3cecc04a2c9fb668bd42a72045891e7a42 | [
"BSD-3-Clause"
] | 1 | 2021-09-13T06:36:18.000Z | 2021-09-13T06:36:18.000Z | dace/registry.py | 1C4nfaN/dace | 4d65e0951c112160fe783766404a806b6043b521 | [
"BSD-3-Clause"
] | null | null | null | dace/registry.py | 1C4nfaN/dace | 4d65e0951c112160fe783766404a806b6043b521 | [
"BSD-3-Clause"
] | null | null | null | """ Contains class decorators to ease creating classes and enumerations whose
subclasses and values can be registered externally. """
from aenum import Enum, extend_enum
from copy import deepcopy
from typing import Dict, Type
def make_registry(cls: Type):
"""
Decorator that turns a class into a user-extensible class with three
class methods: ``register``, ``unregister``, and ``extensions``.
The first method accepts one class parameter and registers it into the
extensions, the second method removes the class parameter from the
registry, and the third method returns a list of currently-registered
extensions.
"""
def _register(cls: Type, subclass: Type, kwargs: Dict):
cls._registry_[subclass] = kwargs
def _unregister(cls: Type, subclass: Type):
del cls._registry_[subclass]
cls._registry_ = {}
cls.register = lambda subclass, **kwargs: _register(cls, subclass, kwargs)
cls.unregister = lambda subclass: _unregister(cls, subclass)
cls.extensions = lambda: cls._registry_
return cls
def autoregister(cls: Type, **kwargs):
"""
Decorator for subclasses of user-extensible classes (see ``make_registry``)
that automatically registers the subclass with the superclass registry upon
creation.
"""
registered = False
for base in cls.__bases__:
if hasattr(base, '_registry_') and hasattr(base, 'register'):
base.register(cls, **kwargs)
registered = True
if not registered:
raise TypeError('Class does not extend registry classes')
return cls
def autoregister_params(**params):
"""
Decorator for subclasses of user-extensible classes (see ``make_registry``)
that automatically registers the subclass with the superclass registry upon
creation. Uses the arguments given to register the value of the subclass.
"""
return lambda cls: autoregister(cls, **params)
def extensible_enum(cls: Type):
"""
Decorator that adds a function called ``register`` to an enumeration,
extending its values. Note that new values cannot be unregistered.
New entries can be registered either with a single, string argument for
a new name (a value will be auto-assigned), or with additional arguments
for the value.
"""
if not issubclass(cls, Enum):
raise TypeError("Only aenum.Enum subclasses may be made extensible")
def _extend_enum(cls: Type, name: str, *value):
extend_enum(cls, name, *value)
cls.register = lambda name, *args: _extend_enum(cls, name, *args)
return cls
| 34.72 | 79 | 0.702765 |
38efb66bd664b8df8be1885ce0ad1908b5e5925a | 3,011 | py | Python | from_cpython/Lib/test/test_macostools.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:28:45.000Z | 2020-02-06T14:28:45.000Z | from_cpython/Lib/test/test_macostools.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | from_cpython/Lib/test/test_macostools.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:29:00.000Z | 2020-02-06T14:29:00.000Z | # expected: fail
# Copyright (C) 2003 Python Software Foundation
import unittest
import os
import sys
from test import test_support
MacOS = test_support.import_module('MacOS')
#The following modules should exist if MacOS exists.
import Carbon.File
import macostools
TESTFN2 = test_support.TESTFN + '2'
requires_32bit = unittest.skipUnless(sys.maxint < 2**32, '32-bit only test')
class TestMacostools(unittest.TestCase):
def setUp(self):
fp = open(test_support.TESTFN, 'w')
fp.write('hello world\n')
fp.close()
rfp = MacOS.openrf(test_support.TESTFN, '*wb')
rfp.write('goodbye world\n')
rfp.close()
def tearDown(self):
test_support.unlink(test_support.TESTFN)
test_support.unlink(TESTFN2)
def compareData(self):
fp = open(test_support.TESTFN, 'r')
data1 = fp.read()
fp.close()
fp = open(TESTFN2, 'r')
data2 = fp.read()
fp.close()
if data1 != data2:
return 'Data forks differ'
rfp = MacOS.openrf(test_support.TESTFN, '*rb')
data1 = rfp.read(1000)
rfp.close()
rfp = MacOS.openrf(TESTFN2, '*rb')
data2 = rfp.read(1000)
rfp.close()
if data1 != data2:
return 'Resource forks differ'
return ''
def test_touched(self):
# This really only tests that nothing unforeseen happens.
with test_support.check_warnings(('macostools.touched*',
DeprecationWarning), quiet=True):
macostools.touched(test_support.TESTFN)
@requires_32bit
def test_copy(self):
test_support.unlink(TESTFN2)
macostools.copy(test_support.TESTFN, TESTFN2)
self.assertEqual(self.compareData(), '')
@requires_32bit
def test_mkalias(self):
test_support.unlink(TESTFN2)
macostools.mkalias(test_support.TESTFN, TESTFN2)
fss, _, _ = Carbon.File.ResolveAliasFile(TESTFN2, 0)
self.assertEqual(fss.as_pathname(), os.path.realpath(test_support.TESTFN))
@requires_32bit
# If the directory doesn't exist, then chances are this is a new
# install of Python so don't create it since the user might end up
# running ``sudo make install`` and creating the directory here won't
# leave it with the proper permissions.
@unittest.skipUnless(os.path.exists(sys.prefix),
"%r doesn't exist" % sys.prefix)
def test_mkalias_relative(self):
test_support.unlink(TESTFN2)
macostools.mkalias(test_support.TESTFN, TESTFN2, sys.prefix)
fss, _, _ = Carbon.File.ResolveAliasFile(TESTFN2, 0)
self.assertEqual(fss.as_pathname(), os.path.realpath(test_support.TESTFN))
def test_main():
# Skip on wide unicode
if len(u'\0'.encode('unicode-internal')) == 4:
raise unittest.SkipTest("test_macostools is broken in USC4")
test_support.run_unittest(TestMacostools)
if __name__ == '__main__':
test_main()
| 32.031915 | 82 | 0.645633 |
310a6a50058ef42fc9df9d6390b1ae0514fb4a1f | 11,965 | py | Python | pysuite/drive.py | staftermath/pysuite | 01bc63460b1e3d5d7507151230cc2af4731c362a | [
"BSD-3-Clause"
] | 2 | 2020-10-15T04:05:56.000Z | 2022-01-20T01:25:24.000Z | pysuite/drive.py | staftermath/pysuite | 01bc63460b1e3d5d7507151230cc2af4731c362a | [
"BSD-3-Clause"
] | 17 | 2020-08-29T03:10:15.000Z | 2021-11-06T17:59:15.000Z | pysuite/drive.py | staftermath/pysuite | 01bc63460b1e3d5d7507151230cc2af4731c362a | [
"BSD-3-Clause"
] | null | null | null | """implement api to access google drive
"""
import logging
from pathlib import PosixPath, Path
from typing import Union, Optional, List
import re
from googleapiclient.discovery import Resource
from googleapiclient.http import MediaIoBaseDownload, MediaFileUpload
from pysuite.utilities import retry_on_out_of_quota, MAX_RETRY_ATTRIBUTE, SLEEP_ATTRIBUTE
class Drive:
"""Class to interact with Google Drive API
:param service: an authorized Google Drive service client.
:param max_retry: max number of retry on quota exceeded error. if 0 or less, no retry will be attempted.
:param sleep: base number of seconds between retries. the sleep time is exponentially increased after each retry.
"""
def __init__(self, service: Resource, max_retry: int=0, sleep: int=5):
self._service = service
setattr(self, MAX_RETRY_ATTRIBUTE, max_retry)
setattr(self, SLEEP_ATTRIBUTE, sleep)
@retry_on_out_of_quota()
def download(self, id: str, to_file: Union[str, PosixPath]):
"""download the google drive file with the requested id to target local file.
:param id: id of the google drive file
:param to_file: local file path
:return: None
"""
request = self._service.files().get_media(fileId=id)
with open(to_file, 'wb') as fh:
downloader = MediaIoBaseDownload(fh, request)
done = False
while not done:
status, done = downloader.next_chunk()
logging.info(f"Download {status.progress()*100}%")
@retry_on_out_of_quota()
def upload(self, from_file: Union[str, PosixPath], name: Optional[str]=None, mimetype: Optional[str]=None,
parent_id: Optional[str]=None) -> str:
"""upload local file to gdrive.
:param from_file: path to local file.
:param name: name of google drive file. If None, the name of local file will be used.
:param mimetype: Mime-type of the file. If None then a mime-type will be guessed from the file extension.
:param parent_id: id of the folder you want to upload the file to. If None, it will be uploaded to
root of Google drive.
:return: id of the uploaded file
"""
file_metadata = {'name': name if name is not None else Path(from_file).name}
if parent_id is not None:
file_metadata["parents"] = parent_id
media = MediaFileUpload(str(from_file),
mimetype=mimetype,
resumable=True)
file = self._service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
return file.get("id")
@retry_on_out_of_quota()
def update(self, id: str, from_file: Union[str, PosixPath]):
"""update the Google drive with local file.
:param id: id of the Google drive file to be updated
:param from_file: path to local file.
:return: None
"""
media = MediaFileUpload(str(from_file),
resumable=True)
self._service.files().update(body=dict(), fileId=id, media_body=media).execute()
@retry_on_out_of_quota()
def get_id(self, name: str, parent_id: Optional[str]=None):
"""get the id of the file with specified name. if more than one file are found, an error will be raised.
:param name: name of the file to be searched.
:param parent_id: id of the folder to limit the search. If None, the full Google drive will be searched.
:return: the id of the file if found. Or None if no such name is found.
"""
q = f"name = '{name}' and trashed = false"
if parent_id is not None:
q += f" and '{parent_id}' in parents"
response = self._service.files().list(pageSize=10,
fields=self._get_fields_query_string(),
q=q).execute()
item = response.get('files', None)
if item is None:
return None
if len(item) > 1:
raise RuntimeError(f"More than one file is found. Please rename the file with a unique string.")
return item[0]['id']
@retry_on_out_of_quota()
def find(self, name_contains: Optional[str]=None, name_not_contains: Optional[str]=None,
parent_id: Optional[str]=None) -> list:
"""find all files whose name contain specified string and do not contain specified string. Note that Google
API has unexpected behavior when searching for strings in name. It is can only search first 26 character. In
addition, it seems to search from first alphabetic character and Assume there are the following files:
'positive_a', 'positive_b', 'a', '_a', 'ba'
:example:
>>> self.find(name_contains='a') # this finds only 'a' and '_a', not 'positive_a' or 'ba'
:param name_contains: a string contained in the name
:param name_not_contains: a string that is not contained in the name
:param parent_id: parent folder id
:return: a list of dictionaries containing id and name of found files.
"""
if name_contains is None and name_not_contains is None:
raise ValueError("name_contains and name_not_contains cannot both be None")
q_name_contains = ""
q_name_not_contains = ""
if name_contains is not None:
q_name_contains = f"and name contains '{name_contains}'"
if name_not_contains is not None:
q_name_not_contains = f"and not name contains '{name_not_contains}'"
q = f"trashed = false {q_name_contains} {q_name_not_contains}"
if parent_id is not None:
q += f" and '{parent_id}' in parents"
response = self._service.files().list(pageSize=100,
fields=self._get_fields_query_string(),
q=q).execute()
item = response.get('files', [])
return item
@retry_on_out_of_quota()
def list(self, id: str, regex: str=None, recursive: bool=False, depth: int=3) -> list:
"""list the content of the folder by the given id.
:param id: id of the folder to be listed.
:param regex: an regular expression used to filter returned file and folders.
:param recursive: if True, children of the folder will also be listed.
:param depth: number of recursion if recursive is True. This is to prevent cyclic nesting or deep nested folders.
:return: a list of dictionaries containing id, name of the object contained in the target folder and list of
parent ids.
"""
q = f"'{id}' in parents and trashed = false"
result = []
page_token = "" # place holder to start the loop
while page_token is not None:
response = self._service.files().list(q=q,
spaces='drive',
fields=self._get_fields_query_string(["id", "name", "parents"]),
pageToken=page_token).execute()
result.extend(response.get("files", []))
page_token = response.get("nextPageToken", None)
if recursive and depth > 0:
for file in result:
children_id = file["id"]
result.extend(self.list(id=children_id, recursive=True, depth=depth-1))
if regex is not None:
pattern = re.compile(regex)
result = [f for f in result if pattern.match(f["name"])]
return result
@retry_on_out_of_quota()
def delete(self, id: str, recursive: bool=False):
"""delete target file from google drive
TODO: implement recursive delete
:param id: id of target object.
:param recursive: if True and target id represents a folder, remove all nested files and folders.
:return: None
"""
self._service.files().delete(fileId=id).execute()
@retry_on_out_of_quota()
def create_folder(self, name: str, parent_ids: Optional[list]=None) -> str:
"""create a folder on google drive by the given name.
:param name: name of the folder to be created.
:param parent_ids: list of ids where you want to create your folder in.
:return: id of the created folder.
"""
file_metadata = {
'name': name,
'mimeType': 'application/vnd.google-apps.folder'
}
if parent_ids is not None:
if not isinstance(parent_ids, list):
raise TypeError(f"parent_ids must be a list. got {type(parent_ids)}")
if len(parent_ids) == 0:
raise ValueError(f"parent_ids cannot be empty")
file_metadata["parents"] = parent_ids
folder = self._service.files().create(body=file_metadata, fields='id').execute()
return folder.get("id")
@retry_on_out_of_quota()
def share(self, id: str, emails: List[str], role: str= "reader", notify=True): # pragma: no cover
"""modify the permission of the target object and share with the provided emails.
:param id: id of target object.
:param emails: list of emails to be shared with.
:param role: type of permission. accepted values are: 'owner', 'organizer', 'fileOrganzier', 'writer',
'commenter' and 'reader'.
:param notify: Whether notifying emails about the sharing.
:return: name of the object shared.
"""
call_back = None
batch = self._service.new_batch_http_request(callback=call_back)
for email in emails:
user_permission = {
"type": "user",
"role": role,
"emailAddress": email
}
batch.add(self._service.permissions().create(
fileId=id,
body=user_permission,
fields='id',
sendNotification=notify
))
batch.execute()
return self.get_name(id)
def _get_fields_query_string(self, fields: Optional[list]=None) -> str:
"""create a string used to query gdrive object and return requested fields.
:param fields: list of fields to be returned in query.
:return: a string used to query gdrive. only usable in `fields` arguments in list()
"""
if fields is None:
fields = ["id", "name"]
if not isinstance(fields, list):
raise TypeError(f"fields must be a list. got {type(fields)}")
if len(fields) == 0:
raise ValueError("fields cannot be empty")
return f"nextPageToken, files({','.join(fields)})"
@retry_on_out_of_quota()
def get_name(self, id: str) -> str:
"""get the name of the Google drive object.
:param id: id of the target Google drive object
:return: name of the object
"""
file = self._service.files().get(fileId=id).execute()
return file['name']
@retry_on_out_of_quota()
def copy(self, id: str, name: str, parent_id: Optional[str]=None) -> str:
"""copy target file and give the new file specified name. return the id of the created file.
:param id: target file to be copied.
:param name: name of the new file.
:param parent_id: the id of the folder where the new file is placed in. If None, the file will be placed in
Google Drive root.
:return: id of the created new file.
"""
request = {"name": name}
if parent_id is not None:
request["parents"] = parent_id
file = self._service.files().copy(fileId=id, body=request, fields='id').execute()
return file.get("id")
| 42.429078 | 121 | 0.6056 |
3087469891a220ce518dcc36f8f588086051f9ed | 1,414 | py | Python | aliyun-python-sdk-push/aliyunsdkpush/request/v20160801/CheckCertificateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-push/aliyunsdkpush/request/v20160801/CheckCertificateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-push/aliyunsdkpush/request/v20160801/CheckCertificateRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkpush.endpoint import endpoint_data
class CheckCertificateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Push', '2016-08-01', 'CheckCertificate')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AppKey(self):
return self.get_query_params().get('AppKey')
def set_AppKey(self,AppKey):
self.add_query_param('AppKey',AppKey) | 37.210526 | 74 | 0.765912 |
27d859d6d6fd05744007b20bc7ba0c3d1ceeb302 | 688 | py | Python | crescent/resources/s3/bucket/metrics_configuration.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | 1 | 2020-03-26T19:20:03.000Z | 2020-03-26T19:20:03.000Z | crescent/resources/s3/bucket/metrics_configuration.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | null | null | null | crescent/resources/s3/bucket/metrics_configuration.py | mpolatcan/zepyhrus | 2fd0b1b9b21613b5876a51fe8b5f9e3afbec1b67 | [
"Apache-2.0"
] | null | null | null | from crescent.core import Model
from crescent.functions import AnyFn
from .tag_filter import TagFilter
from .constants import ModelRequiredProperties
from typing import Union
class MetricsConfiguration(Model):
def __init__(self):
super(MetricsConfiguration, self).__init__(required_properties=ModelRequiredProperties.METRICS_CONFIGURATION)
def Id(self, id: Union[str, AnyFn]):
return self._set_field(self.Id.__name__, id)
def Prefix(self, prefix: Union[str, AnyFn]):
return self._set_field(self.Prefix.__name__, prefix)
def TagFilters(self, *tag_filters: TagFilter):
return self._set_field(self.TagFilters.__name__, list(tag_filters))
| 34.4 | 117 | 0.764535 |
7b430b7bcaada0d4029e72b581a4aec6e9dfe206 | 9,425 | py | Python | create_figures/kombiplot_systems_i,v,vi.py | hippke/TTV-TDV-exomoons | af1fbcbd257d0c8f873cd1d388100d669f3e2f78 | [
"MIT"
] | 1 | 2019-08-01T09:46:13.000Z | 2019-08-01T09:46:13.000Z | create_figures/kombiplot_systems_i,v,vi.py | hippke/TTV-TDV-exomoons | af1fbcbd257d0c8f873cd1d388100d669f3e2f78 | [
"MIT"
] | null | null | null | create_figures/kombiplot_systems_i,v,vi.py | hippke/TTV-TDV-exomoons | af1fbcbd257d0c8f873cd1d388100d669f3e2f78 | [
"MIT"
] | null | null | null | """n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = a_io
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = a_eur
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.4, +0.4)
plt.ylim(-1, +1)
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
tdv_list = []
ttv_list = []
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = a_io
secondmoon = Body()
secondmoon.mass = 2*M_gan
secondmoon.px = a_eur
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
plt.plot(ttv_array, tdv_array, color = 'b', linestyle=':')
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
tdv_list = []
ttv_list = []
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = a_io
secondmoon = Body()
secondmoon.mass = 3*M_gan
secondmoon.px = a_eur
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *secondmoon.px ** 3) / (G * (secondmoon.mass + planet.mass)))
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon])
plt.plot(ttv_array, tdv_array, 'r--')
plt.savefig("fig_combi_systems_i,v,vi.eps", bbox_inches = 'tight')
| 33.781362 | 185 | 0.669814 |
d8fd51a36c5482f9c76fec1ab25ca9d3843e21d6 | 3,155 | py | Python | kamrecsys/datasets/tests/test_flixster.py | tkamishima/kamrecsys | 62305312f7aaaa8c2985785983c1d2bf68b243c0 | [
"MIT"
] | 7 | 2016-11-01T13:19:16.000Z | 2022-02-23T17:48:49.000Z | kamrecsys/datasets/tests/test_flixster.py | tkamishima/kamrecsys | 62305312f7aaaa8c2985785983c1d2bf68b243c0 | [
"MIT"
] | 1 | 2017-09-03T13:08:48.000Z | 2017-09-20T04:32:55.000Z | kamrecsys/datasets/tests/test_flixster.py | tkamishima/kamrecsys | 62305312f7aaaa8c2985785983c1d2bf68b243c0 | [
"MIT"
] | 6 | 2016-04-21T16:13:23.000Z | 2022-02-25T00:58:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (
print_function,
division,
absolute_import)
from six.moves import xrange
# =============================================================================
# Imports
# =============================================================================
from numpy.testing import (
TestCase,
run_module_suite,
assert_,
assert_allclose,
assert_array_almost_equal_nulp,
assert_array_max_ulp,
assert_array_equal,
assert_array_less,
assert_equal,
assert_raises,
assert_raises_regex,
assert_warns,
assert_string_equal)
import numpy as np
# =============================================================================
# Module variables
# =============================================================================
# =============================================================================
# Functions
# =============================================================================
# =============================================================================
# Test Classes
# =============================================================================
class TestFlixsterClass(TestCase):
def test_load_flixster_rating(self):
from kamrecsys.datasets import load_flixster_rating
data = load_flixster_rating()
assert_array_equal(
sorted(data.__dict__.keys()),
sorted(['event_otypes', 'n_otypes', 'n_events', 'n_score_levels',
'feature', 'event', 'iid', 'event_feature',
'score', 'eid', 'n_objects', 's_event', 'score_domain']))
assert_array_equal(data.event_otypes, [0, 1])
assert_equal(data.n_otypes, 2)
assert_equal(data.n_events, 8196077)
assert_equal(data.s_event, 2)
assert_array_equal(data.n_objects, [147612, 48794])
# events
assert_array_equal(data.score_domain, [0.5, 5.0, 0.5])
assert_array_equal(
data.event[:5],
[
[124545, 57], [124545, 665], [124545, 969],
[124545, 1650], [124545, 2230]
]
)
assert_array_equal(
data.event[-5:],
[
[14217, 28183], [14217, 36255], [14217, 37636],
[14217, 40326], [14217, 48445]
]
)
assert_array_equal(data.eid[0][:5],
[6, 7, 8, 9, 11])
assert_array_equal(data.eid[0][-5:],
[1049477, 1049489, 1049491, 1049494, 1049508])
assert_array_equal(data.eid[1][:5],
[1, 2, 3, 4, 5])
assert_array_equal(data.eid[1][-5:],
[66712, 66714, 66718, 66725, 66726])
assert_array_equal(data.score[:5], [1.5, 1.0, 2.0, 1.0, 5.0])
assert_array_equal(data.score[-5:], [5.0, 4.0, 3.0, 4.0, 5.0])
# =============================================================================
# Main Routines
# =============================================================================
if __name__ == '__main__':
run_module_suite()
| 33.56383 | 79 | 0.422504 |
61c59e514e4a4857d77be8edc7252999bae6334e | 6,032 | py | Python | spleeter/utils/tensor.py | ogut77/spleeter3 | fb900a8eea57a5a286acf2bf257e66a70db50f2f | [
"MIT"
] | null | null | null | spleeter/utils/tensor.py | ogut77/spleeter3 | fb900a8eea57a5a286acf2bf257e66a70db50f2f | [
"MIT"
] | null | null | null | spleeter/utils/tensor.py | ogut77/spleeter3 | fb900a8eea57a5a286acf2bf257e66a70db50f2f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf8
""" Utility function for tensorflow. """
# pylint: disable=import-error
import tensorflow as tf
import pandas as pd
# pylint: enable=import-error
__email__ = 'spleeter@deezer.com'
__author__ = 'Deezer Research'
__license__ = 'MIT License'
def sync_apply(tensor_dict, func, concat_axis=1):
""" Return a function that applies synchronously the provided func on the
provided dictionnary of tensor. This means that func is applied to the
concatenation of the tensors in tensor_dict. This is useful for performing
random operation that needs the same drawn value on multiple tensor, such
as a random time-crop on both input data and label (the same crop should be
applied to both input data and label, so random crop cannot be applied
separately on each of them).
IMPORTANT NOTE: all tensor are assumed to be the same shape.
Params:
- tensor_dict: dictionary (key: strings, values: tf.tensor)
a dictionary of tensor.
- func: function
function to be applied to the concatenation of the tensors in
tensor_dict
- concat_axis: int
The axis on which to perform the concatenation.
Returns:
processed tensors dictionary with the same name (keys) as input
tensor_dict.
"""
if concat_axis not in {0, 1}:
raise NotImplementedError(
'Function only implemented for concat_axis equal to 0 or 1')
tensor_list = list(tensor_dict.values())
concat_tensor = tf.concat(tensor_list, concat_axis)
processed_concat_tensor = func(concat_tensor)
tensor_shape = tf.shape(input=list(tensor_dict.values())[0])
D = tensor_shape[concat_axis]
if concat_axis == 0:
return {
name: processed_concat_tensor[index * D:(index + 1) * D, :, :]
for index, name in enumerate(tensor_dict)
}
return {
name: processed_concat_tensor[:, index * D:(index + 1) * D, :]
for index, name in enumerate(tensor_dict)
}
def from_float32_to_uint8(
tensor,
tensor_key='tensor',
min_key='min',
max_key='max'):
"""
:param tensor:
:param tensor_key:
:param min_key:
:param max_key:
:returns:
"""
tensor_min = tf.reduce_min(input_tensor=tensor)
tensor_max = tf.reduce_max(input_tensor=tensor)
return {
tensor_key: tf.cast(
(tensor - tensor_min) / (tensor_max - tensor_min + 1e-16)
* 255.9999, dtype=tf.uint8),
min_key: tensor_min,
max_key: tensor_max
}
def from_uint8_to_float32(tensor, tensor_min, tensor_max):
"""
:param tensor:
:param tensor_min:
:param tensor_max:
:returns:
"""
return (
tf.cast(tensor, tf.float32)
* (tensor_max - tensor_min)
/ 255.9999 + tensor_min)
def pad_and_partition(tensor, segment_len):
""" Pad and partition a tensor into segment of len segment_len
along the first dimension. The tensor is padded with 0 in order
to ensure that the first dimension is a multiple of segment_len.
Tensor must be of known fixed rank
:Example:
>>> tensor = [[1, 2, 3], [4, 5, 6]]
>>> segment_len = 2
>>> pad_and_partition(tensor, segment_len)
[[[1, 2], [4, 5]], [[3, 0], [6, 0]]]
:param tensor:
:param segment_len:
:returns:
"""
tensor_size = tf.math.floormod(tf.shape(input=tensor)[0], segment_len)
pad_size = tf.math.floormod(segment_len - tensor_size, segment_len)
padded = tf.pad(
tensor=tensor,
paddings=[[0, pad_size]] + [[0, 0]] * (len(tensor.shape)-1))
split = (tf.shape(input=padded)[0] + segment_len - 1) // segment_len
return tf.reshape(
padded,
tf.concat(
[[split, segment_len], tf.shape(input=padded)[1:]],
axis=0))
def pad_and_reshape(instr_spec, frame_length, F):
"""
:param instr_spec:
:param frame_length:
:param F:
:returns:
"""
spec_shape = tf.shape(input=instr_spec)
extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1]))
n_extra_row = (frame_length) // 2 + 1 - F
extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])
extended_spec = tf.concat([instr_spec, extension], axis=2)
old_shape = tf.shape(input=extended_spec)
new_shape = tf.concat([
[old_shape[0] * old_shape[1]],
old_shape[2:]],
axis=0)
processed_instr_spec = tf.reshape(extended_spec, new_shape)
return processed_instr_spec
def dataset_from_csv(csv_path, **kwargs):
""" Load dataset from a CSV file using Pandas. kwargs if any are
forwarded to the `pandas.read_csv` function.
:param csv_path: Path of the CSV file to load dataset from.
:returns: Loaded dataset.
"""
df = pd.read_csv(csv_path, **kwargs)
dataset = (
tf.data.Dataset.from_tensor_slices(
{key: df[key].values for key in df})
)
return dataset
def check_tensor_shape(tensor_tf, target_shape):
""" Return a Tensorflow boolean graph that indicates whether
sample[features_key] has the specified target shape. Only check
not None entries of target_shape.
:param tensor_tf: Tensor to check shape for.
:param target_shape: Target shape to compare tensor to.
:returns: True if shape is valid, False otherwise (as TF boolean).
"""
result = tf.constant(True)
for i, target_length in enumerate(target_shape):
if target_length:
result = tf.logical_and(
result,
tf.equal(tf.constant(target_length), tf.shape(input=tensor_tf)[i]))
return result
def set_tensor_shape(tensor, tensor_shape):
""" Set shape for a tensor (not in place, as opposed to tf.set_shape)
:param tensor: Tensor to reshape.
:param tensor_shape: Shape to apply to the tensor.
:returns: A reshaped tensor.
"""
# NOTE: That SOUND LIKE IN PLACE HERE ?
tensor.set_shape(tensor_shape)
return tensor
| 31.416667 | 83 | 0.649867 |
bb9dbb2431c5d730ca2ef5edf8e94dcef7d63fd8 | 2,242 | py | Python | katcp/compat.py | cnb0/katcp-python | 35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c | [
"BSD-3-Clause"
] | 8 | 2015-02-25T20:13:54.000Z | 2019-09-12T06:12:07.000Z | katcp/compat.py | cnb0/katcp-python | 35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c | [
"BSD-3-Clause"
] | 67 | 2015-01-12T09:58:36.000Z | 2021-05-12T14:23:26.000Z | katcp/compat.py | cnb0/katcp-python | 35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c | [
"BSD-3-Clause"
] | 15 | 2015-04-28T13:18:28.000Z | 2021-01-19T16:16:33.000Z | # katcp.py
# -*- coding: utf8 -*-
# vim:fileencoding=utf8 ai ts=4 sts=4 et sw=4
# Copyright 2019 National Research Foundation (South African Radio Astronomy Observatory)
# BSD license - see LICENSE for details
"""Utilities for dealing with Python 2 and 3 compatibility."""
from __future__ import absolute_import, division, print_function
from future import standard_library
standard_library.install_aliases() # noqa: E402
import builtins
import future
if future.utils.PY2:
def ensure_native_str(value):
"""Coerce unicode string or bytes to native string type (UTF-8 encoding)."""
if isinstance(value, str):
return value
elif isinstance(value, unicode):
return value.encode('utf-8')
else:
raise TypeError(
"Invalid type for string conversion: {}".format(type(value)))
else:
def ensure_native_str(value):
"""Coerce unicode string or bytes to native string type (UTF-8 encoding)."""
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode('utf-8')
else:
raise TypeError(
"Invalid type for string conversion: {}".format(type(value)))
def byte_chars(byte_string):
"""Return list of characters from a byte string (PY3-compatible).
In PY2, `list(byte_string)` works fine, but in PY3, this returns
each element as an int instead of single character byte string.
Slicing is used instead to get the individual characters.
Parameters
----------
byte_string : bytes
Byte string to be split into characters.
Returns
-------
chars : list
The individual characters, each as a byte string.
"""
return [byte_string[i:i+1] for i in range(len(byte_string))]
def is_bytes(value):
"""Indicate if object is bytes-like.
future.utils.isbytes is deprecated, so re-implementing, as per their
recommendation.
"""
return isinstance(value, builtins.bytes)
def is_text(value):
"""Indicate if object is text-like.
future.utils.istext is deprecated, so re-implementing, as per their
recommendation.
"""
return isinstance(value, builtins.str)
| 29.893333 | 89 | 0.664585 |
7200d3e3a5f15df58f1a78fd401d2cf75df6735b | 6,386 | py | Python | test/test_sersic_lens.py | LBJ-Wade/astrofunc_lensing_profile | d2223705bc44d07575a5e93291375ab8e69ebfa8 | [
"MIT"
] | null | null | null | test/test_sersic_lens.py | LBJ-Wade/astrofunc_lensing_profile | d2223705bc44d07575a5e93291375ab8e69ebfa8 | [
"MIT"
] | null | null | null | test/test_sersic_lens.py | LBJ-Wade/astrofunc_lensing_profile | d2223705bc44d07575a5e93291375ab8e69ebfa8 | [
"MIT"
] | null | null | null | __author__ = 'sibirrer'
import astrofunc.LensingProfiles.calc_util as calc_util
from astrofunc.LensingProfiles.sersic import Sersic
from astrofunc.LightProfiles.sersic import Sersic as Sersic_light
import numpy as np
import pytest
import numpy.testing as npt
class TestSersic(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.sersic = Sersic()
self.sersic_light = Sersic_light()
def test_function(self):
x = 1
y = 2
n_sersic = 2.
r_eff = 1.
k_eff = 0.2
values = self.sersic.function(x, y, n_sersic, r_eff, k_eff)
npt.assert_almost_equal(values, 1.0272982586319199, decimal=10)
x = np.array([0])
y = np.array([0])
values = self.sersic.function(x, y, n_sersic, r_eff, k_eff)
npt.assert_almost_equal(values[0], 0., decimal=10)
x = np.array([2,3,4])
y = np.array([1,1,1])
values = self.sersic.function(x, y, n_sersic, r_eff, k_eff)
npt.assert_almost_equal(values[0], 1.0272982586319199, decimal=10)
npt.assert_almost_equal(values[1], 1.3318743892966658, decimal=10)
npt.assert_almost_equal(values[2], 1.584299393114988, decimal=10)
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
n_sersic = 2.
r_eff = 1.
k_eff = 0.2
f_x, f_y = self.sersic.derivatives(x, y, n_sersic, r_eff, k_eff)
assert f_x[0] == 0.16556078301997193
assert f_y[0] == 0.33112156603994386
x = np.array([0])
y = np.array([0])
f_x, f_y = self.sersic.derivatives(x, y, n_sersic, r_eff, k_eff)
assert f_x[0] == 0
assert f_y[0] == 0
x = np.array([1,3,4])
y = np.array([2,1,1])
values = self.sersic.derivatives(x, y, n_sersic, r_eff, k_eff)
assert values[0][0] == 0.16556078301997193
assert values[1][0] == 0.33112156603994386
assert values[0][1] == 0.2772992378623737
assert values[1][1] == 0.092433079287457892
def test_differentails(self):
x_, y_ = 1., 1
n_sersic = 2.
r_eff = 1.
k_eff = 0.2
r = np.sqrt(x_**2 + y_**2)
d_alpha_dr = self.sersic.d_alpha_dr(x_, y_, n_sersic, r_eff, k_eff)
alpha = self.sersic.alpha_abs(x_, y_, n_sersic, r_eff, k_eff)
f_xx_ = d_alpha_dr * calc_util.d_r_dx(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dx(x_, y_)
f_yy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * y_/r + alpha * calc_util.d_y_diffr_dy(x_, y_)
f_xy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dy(x_, y_)
f_xx = (d_alpha_dr/r - alpha/r**2) * y_**2/r + alpha/r
f_yy = (d_alpha_dr/r - alpha/r**2) * x_**2/r + alpha/r
f_xy = (d_alpha_dr/r - alpha/r**2) * x_*y_/r
npt.assert_almost_equal(f_xx, f_xx_, decimal=10)
npt.assert_almost_equal(f_yy, f_yy_, decimal=10)
npt.assert_almost_equal(f_xy, f_xy_, decimal=10)
def test_hessian(self):
x = np.array([1])
y = np.array([2])
n_sersic = 2.
r_eff = 1.
k_eff = 0.2
f_xx, f_yy,f_xy = self.sersic.hessian(x, y, n_sersic, r_eff, k_eff)
assert f_xx[0] == 0.1123170666045793
npt.assert_almost_equal(f_yy[0], -0.047414082641598576, decimal=10)
npt.assert_almost_equal(f_xy[0], -0.10648743283078525 , decimal=10)
x = np.array([1,3,4])
y = np.array([2,1,1])
values = self.sersic.hessian(x, y, n_sersic, r_eff, k_eff)
assert values[0][0] == 0.1123170666045793
npt.assert_almost_equal(values[1][0], -0.047414082641598576, decimal=10)
npt.assert_almost_equal(values[2][0], -0.10648743283078525 , decimal=10)
npt.assert_almost_equal(values[0][1], -0.053273787681591328, decimal=10)
npt.assert_almost_equal(values[1][1], 0.076243427402007985, decimal=10)
npt.assert_almost_equal(values[2][1], -0.048568955656349749, decimal=10)
def test_alpha_abs(self):
x = 1.
dr = 0.0000001
n_sersic = 2.5
r_eff = .5
k_eff = 0.2
alpha_abs = self.sersic.alpha_abs(x, 0, n_sersic, r_eff, k_eff)
f_dr = self.sersic.function(x + dr, 0, n_sersic, r_eff, k_eff)
f_ = self.sersic.function(x, 0, n_sersic, r_eff, k_eff)
alpha_abs_num = -(f_dr - f_)/dr
npt.assert_almost_equal(alpha_abs_num, alpha_abs, decimal=3)
def test_dalpha_dr(self):
x = 1.
dr = 0.0000001
n_sersic = 1.
r_eff = .5
k_eff = 0.2
d_alpha_dr = self.sersic.d_alpha_dr(x, 0, n_sersic, r_eff, k_eff)
alpha_dr = self.sersic.alpha_abs(x + dr, 0, n_sersic, r_eff, k_eff)
alpha = self.sersic.alpha_abs(x, 0, n_sersic, r_eff, k_eff)
d_alpha_dr_num = (alpha_dr - alpha)/dr
npt.assert_almost_equal(d_alpha_dr, d_alpha_dr_num, decimal=3)
def test_mag_sym(self):
"""
:return:
"""
r = 2.
angle1 = 0.
angle2 = 1.5
x1 = r * np.cos(angle1)
y1 = r * np.sin(angle1)
x2 = r * np.cos(angle2)
y2 = r * np.sin(angle2)
n_sersic = 4.5
r_eff = 2.5
k_eff = 0.8
f_xx1, f_yy1, f_xy1 = self.sersic.hessian(x1, y1, n_sersic, r_eff, k_eff)
f_xx2, f_yy2, f_xy2 = self.sersic.hessian(x2, y2, n_sersic, r_eff, k_eff)
kappa_1 = (f_xx1 + f_yy1) / 2
kappa_2 = (f_xx2 + f_yy2) / 2
npt.assert_almost_equal(kappa_1, kappa_2, decimal=10)
A_1 = (1 - f_xx1) * (1 - f_yy1) - f_xy1**2
A_2 = (1 - f_xx2) * (1 - f_yy2) - f_xy2 ** 2
npt.assert_almost_equal(A_1, A_2, decimal=10)
def test_convergernce(self):
"""
test the convergence and compares it with the original Sersic profile
:return:
"""
x = np.array([0, 0, 0, 0, 0])
y = np.array([0.5, 1, 1.5, 2, 2.5])
n_sersic = 4.5
r_eff = 2.5
k_eff = 0.2
f_xx, f_yy, f_xy = self.sersic.hessian(x, y, n_sersic, r_eff, k_eff)
kappa = (f_xx + f_yy) / 2.
assert kappa[0] > 0
flux = self.sersic_light.function(x, y, I0_sersic=1., R_sersic=r_eff, n_sersic=n_sersic)
flux /= flux[0]
kappa /= kappa[0]
npt.assert_almost_equal(flux[1], kappa[1], decimal=5)
if __name__ == '__main__':
pytest.main() | 36.913295 | 101 | 0.588945 |
b836958132ca61b68ee2f067a8779fcfed67ab77 | 18,180 | py | Python | tests/test_compute_service.py | njn007/cloudbridge | 422a449a52ca5c92cfe4210fe441eabea58dc506 | [
"MIT"
] | null | null | null | tests/test_compute_service.py | njn007/cloudbridge | 422a449a52ca5c92cfe4210fe441eabea58dc506 | [
"MIT"
] | null | null | null | tests/test_compute_service.py | njn007/cloudbridge | 422a449a52ca5c92cfe4210fe441eabea58dc506 | [
"MIT"
] | null | null | null | import ipaddress
import six
from cloudbridge.base import helpers as cb_helpers
from cloudbridge.base.resources import BaseNetwork
from cloudbridge.factory import ProviderList
from cloudbridge.interfaces import InstanceState
from cloudbridge.interfaces import InvalidConfigurationException
from cloudbridge.interfaces.exceptions import WaitStateException
from cloudbridge.interfaces.resources import Instance
from cloudbridge.interfaces.resources import SnapshotState
from cloudbridge.interfaces.resources import VMType
from tests import helpers
from tests.helpers import ProviderTestBase
from tests.helpers import standard_interface_tests as sit
class CloudComputeServiceTestCase(ProviderTestBase):
_multiprocess_can_split_ = True
@helpers.skipIfNoService(['compute.instances'])
def test_storage_services_event_pattern(self):
# pylint:disable=protected-access
self.assertEqual(
self.provider.compute.instances._service_event_pattern,
"provider.compute.instances",
"Event pattern for {} service should be '{}', "
"but found '{}'.".format("instances",
"provider.compute.instances",
self.provider.compute.instances.
_service_event_pattern))
@helpers.skipIfNoService(['compute.instances', 'networking.networks'])
def test_crud_instance(self):
label = "cb-instcrud-{0}".format(helpers.get_uuid())
# Declare these variables and late binding will allow
# the cleanup method access to the most current values
subnet = None
def create_inst(label):
# Also test whether sending in an empty_dict for user_data
# results in an automatic conversion to string.
return helpers.get_test_instance(self.provider, label,
subnet=subnet, user_data={})
def cleanup_inst(inst):
if inst:
inst.delete()
inst.wait_for([InstanceState.DELETED, InstanceState.UNKNOWN])
inst.refresh()
self.assertTrue(
inst.state == InstanceState.UNKNOWN,
"Instance.state must be unknown when refreshing after a "
"delete but got %s"
% inst.state)
def check_deleted(inst):
deleted_inst = self.provider.compute.instances.get(
inst.id)
self.assertTrue(
deleted_inst is None or deleted_inst.state in (
InstanceState.DELETED,
InstanceState.UNKNOWN),
"Instance %s should have been deleted but still exists." %
label)
subnet = helpers.get_or_create_default_subnet(self.provider)
sit.check_crud(self, self.provider.compute.instances, Instance,
"cb-instcrud", create_inst, cleanup_inst,
custom_check_delete=check_deleted)
def _is_valid_ip(self, address):
try:
ipaddress.ip_address(address)
except ValueError:
return False
return True
@helpers.skipIfNoService(['compute.instances', 'networking.networks',
'security.vm_firewalls',
'security.key_pairs'])
def test_instance_properties(self):
label = "cb-inst-props-{0}".format(helpers.get_uuid())
# Declare these variables and late binding will allow
# the cleanup method access to the most current values
test_instance = None
fw = None
kp = None
with cb_helpers.cleanup_action(lambda: helpers.cleanup_test_resources(
test_instance, fw, kp)):
subnet = helpers.get_or_create_default_subnet(self.provider)
net = subnet.network
kp = self.provider.security.key_pairs.create(name=label)
fw = self.provider.security.vm_firewalls.create(
label=label, description=label, network=net.id)
test_instance = helpers.get_test_instance(self.provider,
label, key_pair=kp,
vm_firewalls=[fw],
subnet=subnet)
self.assertEqual(
test_instance.label, label,
"Instance label {0} is not equal to the expected label"
" {1}".format(test_instance.label, label))
image_id = helpers.get_provider_test_data(self.provider, "image")
self.assertEqual(test_instance.image_id, image_id,
"Image id {0} is not equal to the expected id"
" {1}".format(test_instance.image_id, image_id))
self.assertIsInstance(test_instance.zone_id,
six.string_types)
self.assertEqual(
test_instance.image_id,
helpers.get_provider_test_data(self.provider, "image"))
self.assertIsInstance(test_instance.public_ips, list)
if test_instance.public_ips:
self.assertTrue(
test_instance.public_ips[0], "public ip should contain a"
" valid value if a list of public_ips exist")
self.assertIsInstance(test_instance.private_ips, list)
self.assertTrue(test_instance.private_ips[0], "private ip should"
" contain a valid value")
self.assertEqual(
test_instance.key_pair_id,
kp.id)
self.assertIsInstance(test_instance.vm_firewalls, list)
self.assertEqual(
test_instance.vm_firewalls[0],
fw)
self.assertIsInstance(test_instance.vm_firewall_ids, list)
self.assertEqual(
test_instance.vm_firewall_ids[0],
fw.id)
# Must have either a public or a private ip
ip_private = test_instance.private_ips[0] \
if test_instance.private_ips else None
ip_address = test_instance.public_ips[0] \
if test_instance.public_ips and test_instance.public_ips[0] \
else ip_private
# Convert to unicode for py27 compatibility with ipaddress()
ip_address = u"{}".format(ip_address)
self.assertIsNotNone(
ip_address,
"Instance must have either a public IP or a private IP")
self.assertTrue(
self._is_valid_ip(ip_address),
"Instance must have a valid IP address. Got: %s" % ip_address)
self.assertIsInstance(test_instance.vm_type_id,
six.string_types)
vm_type = self.provider.compute.vm_types.get(
test_instance.vm_type_id)
self.assertEqual(
vm_type, test_instance.vm_type,
"VM type {0} does not match expected type {1}".format(
vm_type.name, test_instance.vm_type))
self.assertIsInstance(vm_type, VMType)
expected_type = helpers.get_provider_test_data(self.provider,
'vm_type')
self.assertEqual(
vm_type.name, expected_type,
"VM type {0} does not match expected type {1}".format(
vm_type.name, expected_type))
find_zone = [zone for zone in
self.provider.compute.regions.current.zones
if zone.id == test_instance.zone_id]
self.assertEqual(len(find_zone), 1,
"Instance's placement zone could not be "
" found in zones list")
@helpers.skipIfNoService(['compute.instances', 'compute.images',
'compute.vm_types'])
def test_block_device_mapping_launch_config(self):
lc = self.provider.compute.instances.create_launch_config()
# specifying an invalid size should raise
# an exception
with self.assertRaises(InvalidConfigurationException):
lc.add_volume_device(size=-1)
# Attempting to add a blank volume without specifying a size
# should raise an exception
with self.assertRaises(InvalidConfigurationException):
lc.add_volume_device(source=None)
# block_devices should be empty so far
self.assertListEqual(
lc.block_devices, [], "No block devices should have been"
" added to mappings list since the configuration was"
" invalid")
# Add a new volume
lc.add_volume_device(size=1, delete_on_terminate=True)
# Override root volume size
image_id = helpers.get_provider_test_data(self.provider, "image")
img = self.provider.compute.images.get(image_id)
# The size should be greater then the ami size
# and therefore, img.min_disk is used.
lc.add_volume_device(
is_root=True,
source=img,
size=img.min_disk if img and img.min_disk else 30,
delete_on_terminate=True)
# Attempting to add more than one root volume should raise an
# exception.
with self.assertRaises(InvalidConfigurationException):
lc.add_volume_device(size=1, is_root=True)
# Attempting to add an incorrect source should raise an exception
with self.assertRaises(InvalidConfigurationException):
lc.add_volume_device(
source="invalid_source",
delete_on_terminate=True)
# Add all available ephemeral devices
vm_type_name = helpers.get_provider_test_data(
self.provider,
"vm_type")
vm_type = self.provider.compute.vm_types.find(
name=vm_type_name)[0]
for _ in range(vm_type.num_ephemeral_disks):
lc.add_ephemeral_device()
# block_devices should be populated
self.assertTrue(
len(lc.block_devices) == 2 + vm_type.num_ephemeral_disks,
"Expected %d total block devices bit found %d" %
(2 + vm_type.num_ephemeral_disks, len(lc.block_devices)))
@helpers.skipIfNoService(['compute.instances', 'compute.images',
'compute.vm_types', 'storage.volumes'])
def test_block_device_mapping_attachments(self):
label = "cb-blkattch-{0}".format(helpers.get_uuid())
if self.provider.PROVIDER_ID == ProviderList.OPENSTACK:
raise self.skipTest("Not running BDM tests because OpenStack is"
" not stable enough yet")
test_vol = self.provider.storage.volumes.create(
label, 1)
with cb_helpers.cleanup_action(lambda: test_vol.delete()):
test_vol.wait_till_ready()
test_snap = test_vol.create_snapshot(label=label,
description=label)
def cleanup_snap(snap):
if snap:
snap.delete()
snap.wait_for([SnapshotState.UNKNOWN],
terminal_states=[SnapshotState.ERROR])
with cb_helpers.cleanup_action(lambda: cleanup_snap(test_snap)):
test_snap.wait_till_ready()
lc = self.provider.compute.instances.create_launch_config()
# Add a new blank volume
lc.add_volume_device(size=1, delete_on_terminate=True)
# Attach an existing volume
lc.add_volume_device(size=1, source=test_vol,
delete_on_terminate=True)
# Add a new volume based on a snapshot
lc.add_volume_device(size=1, source=test_snap,
delete_on_terminate=True)
# Override root volume size
image_id = helpers.get_provider_test_data(
self.provider,
"image")
img = self.provider.compute.images.get(image_id)
# The size should be greater then the ami size
# and therefore, img.min_disk is used.
lc.add_volume_device(
is_root=True,
source=img,
size=img.min_disk if img and img.min_disk else 30,
delete_on_terminate=True)
# Add all available ephemeral devices
vm_type_name = helpers.get_provider_test_data(
self.provider,
"vm_type")
vm_type = self.provider.compute.vm_types.find(
name=vm_type_name)[0]
# Some providers, e.g. GCP, has a limit on total number of
# attached disks; it does not matter how many of them are
# ephemeral or persistent. So, wee keep in mind that we have
# attached 4 disks already, and add ephemeral disks accordingly
# to not exceed the limit.
for _ in range(vm_type.num_ephemeral_disks - 4):
lc.add_ephemeral_device()
subnet = helpers.get_or_create_default_subnet(
self.provider)
inst = None
with cb_helpers.cleanup_action(
lambda: helpers.delete_instance(inst)):
inst = helpers.create_test_instance(
self.provider,
label,
subnet=subnet,
launch_config=lc)
try:
inst.wait_till_ready()
except WaitStateException as e:
self.fail("The block device mapped launch did not "
" complete successfully: %s" % e)
# TODO: Check instance attachments and make sure they
# correspond to requested mappings
@helpers.skipIfNoService(['compute.instances', 'networking.networks',
'security.vm_firewalls'])
def test_instance_methods(self):
label = "cb-instmethods-{0}".format(helpers.get_uuid())
# Declare these variables and late binding will allow
# the cleanup method access to the most current values
net = None
test_inst = None
fw = None
with cb_helpers.cleanup_action(lambda: helpers.cleanup_test_resources(
instance=test_inst, vm_firewall=fw, network=net)):
net = self.provider.networking.networks.create(
label=label, cidr_block=BaseNetwork.CB_DEFAULT_IPV4RANGE)
cidr = '10.0.1.0/24'
subnet = net.subnets.create(label=label, cidr_block=cidr)
test_inst = helpers.get_test_instance(self.provider, label,
subnet=subnet)
fw = self.provider.security.vm_firewalls.create(
label=label, description=label, network=net.id)
# Check adding a VM firewall to a running instance
test_inst.add_vm_firewall(fw)
test_inst.refresh()
self.assertTrue(
fw in test_inst.vm_firewalls, "Expected VM firewall '%s'"
" to be among instance vm_firewalls: [%s]" %
(fw, test_inst.vm_firewalls))
# Check removing a VM firewall from a running instance
test_inst.remove_vm_firewall(fw)
test_inst.refresh()
self.assertTrue(
fw not in test_inst.vm_firewalls, "Expected VM firewall"
" '%s' to be removed from instance vm_firewalls: [%s]" %
(fw, test_inst.vm_firewalls))
# check floating ips
router = self.provider.networking.routers.create(label, net)
gateway = net.gateways.get_or_create()
def cleanup_router(router, gateway):
with cb_helpers.cleanup_action(lambda: router.delete()):
with cb_helpers.cleanup_action(lambda: gateway.delete()):
router.detach_subnet(subnet)
router.detach_gateway(gateway)
with cb_helpers.cleanup_action(lambda: cleanup_router(router,
gateway)):
router.attach_subnet(subnet)
router.attach_gateway(gateway)
fip = None
with cb_helpers.cleanup_action(
lambda: helpers.cleanup_fip(fip)):
# check whether adding an elastic ip works
fip = gateway.floating_ips.create()
self.assertFalse(
fip.in_use,
"Newly created floating IP %s should not be in use." %
fip.public_ip)
with cb_helpers.cleanup_action(
lambda: test_inst.remove_floating_ip(fip)):
test_inst.add_floating_ip(fip)
test_inst.refresh()
# On Devstack, FloatingIP is listed under private_ips.
self.assertIn(fip.public_ip, test_inst.public_ips +
test_inst.private_ips)
fip.refresh()
self.assertTrue(
fip.in_use,
"Attached floating IP %s address should be in use."
% fip.public_ip)
test_inst.refresh()
test_inst.reboot()
test_inst.wait_till_ready()
self.assertNotIn(
fip.public_ip,
test_inst.public_ips + test_inst.private_ips)
| 45.678392 | 79 | 0.565677 |
54a0e0d4a11c9bb77125b339c3c7d85474c2f401 | 348 | py | Python | Python/kraken/core/objects/constraints/position_constraint.py | goshow-jp/Kraken | 7088b474b6cc2840cea7ab642c5938e4a3290b6c | [
"BSD-3-Clause"
] | null | null | null | Python/kraken/core/objects/constraints/position_constraint.py | goshow-jp/Kraken | 7088b474b6cc2840cea7ab642c5938e4a3290b6c | [
"BSD-3-Clause"
] | null | null | null | Python/kraken/core/objects/constraints/position_constraint.py | goshow-jp/Kraken | 7088b474b6cc2840cea7ab642c5938e4a3290b6c | [
"BSD-3-Clause"
] | 1 | 2021-12-08T08:31:48.000Z | 2021-12-08T08:31:48.000Z | """Kraken - objects.Constraints.PositionConstraint module.
Classes:
PositionConstraint - Position Constraint.
"""
from constraint import Constraint
from kraken.core.maths.vec3 import Vec3
class PositionConstraint(Constraint):
"""Position Constraint."""
def __init__(self, name):
super(PositionConstraint, self).__init__(name)
| 20.470588 | 58 | 0.755747 |
495fe3f6d5abcc19b1011f99edabc92995a7d2ba | 7,655 | py | Python | artifacts/old_dataset_versions/minimal_commits_v02/amazon-braket-sdk-python/amazon-braket-sdk-python#44/before/moments.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | 3 | 2021-11-08T11:46:42.000Z | 2021-12-27T10:13:38.000Z | artifacts/old_dataset_versions/minimal_commits/amazon-braket-sdk-python/amazon-braket-sdk-python#44/before/moments.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | 2 | 2021-11-09T14:57:09.000Z | 2022-01-12T12:35:58.000Z | artifacts/old_dataset_versions/original_commits_v02/amazon-braket-sdk-python/amazon-braket-sdk-python#44/before/moments.py | MattePalte/Bugs-Quantum-Computing-Platforms | 0c1c805fd5dfce465a8955ee3faf81037023a23e | [
"MIT"
] | null | null | null | # Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from typing import (
Dict,
ItemsView,
Iterable,
KeysView,
List,
Mapping,
NamedTuple,
OrderedDict,
ValuesView,
)
from braket.circuits.instruction import Instruction
from braket.circuits.qubit import Qubit
from braket.circuits.qubit_set import QubitSet
class MomentsKey(NamedTuple):
"""Key of the Moments mapping."""
time: int
qubits: QubitSet
class Moments(Mapping[MomentsKey, Instruction]):
"""
An ordered mapping of `MomentsKey` to `Instruction`. The core data structure that
contains instructions, ordering they are inserted in, and time slices when they
occur. `Moments` implements `Mapping` and functions the same as a read-only
dictionary. It is mutable only through the `add()` method.
This data structure is useful to determine a dependency of instructions, such as
printing or optimizing circuit structure, before sending it to a quantum
device. The original insertion order is preserved and can be retrieved via the `values()`
method.
Args:
instructions (Iterable[Instruction], optional): Instructions to initialize self with.
Default = [].
Examples:
>>> moments = Moments()
>>> moments.add([Instruction(Gate.H(), 0), Instruction(Gate.CNot(), [0, 1])])
>>> moments.add([Instruction(Gate.H(), 0), Instruction(Gate.H(), 1)])
>>> for i, item in enumerate(moments.items()):
... print(f"Item {i}")
... print(f"\\tKey: {item[0]}")
... print(f"\\tValue: {item[1]}")
...
Item 0
Key: MomentsKey(time=0, qubits=QubitSet([Qubit(0)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(0)]))
Item 1
Key: MomentsKey(time=1, qubits=QubitSet([Qubit(0), Qubit(1)]))
Value: Instruction('operator': CNOT, 'target': QubitSet([Qubit(0), Qubit(1)]))
Item 2
Key: MomentsKey(time=2, qubits=QubitSet([Qubit(0)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(0)]))
Item 3
Key: MomentsKey(time=2, qubits=QubitSet([Qubit(1)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(1)]))
"""
def __init__(self, instructions: Iterable[Instruction] = []):
self._moments: OrderedDict[MomentsKey, Instruction] = OrderedDict()
self._max_times: Dict[Qubit, int] = {}
self._qubits = QubitSet()
self._depth = 0
self.add(instructions)
@property
def depth(self) -> int:
"""int: Get the depth (number of slices) of self."""
return self._depth
@property
def qubit_count(self) -> int:
"""int: Get the number of qubits used across all of the instructions."""
return len(self._qubits)
@property
def qubits(self) -> QubitSet:
"""
QubitSet: Get the qubits used across all of the instructions. The order of qubits is based
on the order in which the instructions were added.
Note:
Don't mutate this object, any changes may impact the behavior of this class and / or
consumers. If you need to mutate this, then copy it via `QubitSet(moments.qubits())`.
"""
return self._qubits
def time_slices(self) -> Dict[int, List[Instruction]]:
"""
Get instructions keyed by time.
Returns:
Dict[int, List[Instruction]]: Key is the time and value is a list of instructions that
occur at that moment in time. The order of instructions is in no particular order.
Note:
This is a computed result over self and can be freely mutated. This is re-computed with
every call, with a computational runtime O(N) where N is the number
of instructions in self.
"""
time_slices = {}
for key, instruction in self._moments.items():
instructions = time_slices.get(key.time, [])
instructions.append(instruction)
time_slices[key.time] = instructions
return time_slices
def add(self, instructions: Iterable[Instruction]) -> None:
"""
Add instructions to self.
Args:
instructions (Iterable[Instruction]): Instructions to add to self. The instruction
are added to the max time slice in which the instruction fits.
"""
for instruction in instructions:
self._add(instruction)
def _add(self, instruction: Instruction) -> None:
qubit_range = range(min(instruction.target), max(instruction.target) + 1)
time = max([self._max_time_for_qubit(qubit) for qubit in qubit_range]) + 1
# Mark all qubits in the range to avoid another gate being placed in the overlap.
# For example CNOT(0, 5) would draw a line from 0 to 5 and therefore should prevent
# another instruction using those qubits in that time moment.
for qubit in qubit_range:
self._max_times[qubit] = max(time, self._max_time_for_qubit(qubit))
self._moments[MomentsKey(time, instruction.target)] = instruction
self._qubits.update(instruction.target)
self._depth = max(self._depth, time + 1)
def _max_time_for_qubit(self, qubit: Qubit) -> int:
return self._max_times.get(qubit, -1)
#
# Implement abstract methods, default to calling selfs underlying dictionary
#
def keys(self) -> KeysView[MomentsKey]:
"""Return a view of self's keys."""
return self._moments.keys()
def items(self) -> ItemsView[MomentsKey, Instruction]:
"""Return a view of self's (key, instruction)."""
return self._moments.items()
def values(self) -> ValuesView[Instruction]:
"""Return a view of self's instructions."""
return self._moments.values()
def get(self, key: MomentsKey, default=None) -> Instruction:
"""
Get the instruction in self by key.
Args:
key (MomentsKey): Key of the instruction to fetch.
default (Any, optional): Value to return if `key` is not in moment. Default = None.
Returns:
Instruction: moments[key] if key in moments, else `default` is returned.
"""
return self._moments.get(key, default)
def __getitem__(self, key):
return self._moments.__getitem__(key)
def __iter__(self):
return self._moments.__iter__()
def __len__(self):
return self._moments.__len__()
def __contains__(self, item):
return self._moments.__contains__(item)
def __eq__(self, other):
if isinstance(other, Moments):
return (self._moments) == (other._moments)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
return not result
return NotImplemented
def __repr__(self):
return self._moments.__repr__()
def __str__(self):
return self._moments.__str__()
| 35.771028 | 99 | 0.634095 |
2f50310294dc9be22116849ffbe7121f81e7d8a2 | 2,318 | py | Python | bingads/v12/internal/bulk/row_values.py | channable/BingAds-Python-SDK | 205ebf9bdd9701d5d05c5f9ac59702083754f553 | [
"MIT"
] | null | null | null | bingads/v12/internal/bulk/row_values.py | channable/BingAds-Python-SDK | 205ebf9bdd9701d5d05c5f9ac59702083754f553 | [
"MIT"
] | null | null | null | bingads/v12/internal/bulk/row_values.py | channable/BingAds-Python-SDK | 205ebf9bdd9701d5d05c5f9ac59702083754f553 | [
"MIT"
] | 1 | 2019-03-15T10:43:21.000Z | 2019-03-15T10:43:21.000Z | from .csv_headers import _CsvHeaders
from .mappings import _SimpleBulkMapping
from bingads.v12.bulk import EntityReadException
import six
class _RowValues:
def __init__(self, mappings=None, columns=None):
self._mappings = mappings
self._columns = columns
if self.mappings is None:
self._mappings = _CsvHeaders.get_mappings()
if self.columns is None:
self._columns = [None] * len(self._mappings)
def __getitem__(self, key):
return self.columns[self._mappings[key]]
def __setitem__(self, key, value):
self.columns[self._mappings[key]] = value
def __contains__(self, item):
return item in self.mappings
def __len__(self):
return len(self.mappings)
def __str__(self):
return u'{' + u', '.join([u'{0}:{1}'.format(k, self.columns[v]) for (k, v) in self.mappings.items()]) + u'}'
def convert_to_entity(self, entity, bulk_mappings):
for mapping in bulk_mappings:
try:
mapping.convert_to_entity(self, entity)
except Exception as ex:
raise self._create_entity_read_exception(entity, mapping, ex)
def _create_entity_read_exception(self, entity, mapping, ex):
entity_type = str(type(entity))
if isinstance(mapping, _SimpleBulkMapping):
message = "Couldn't parse column {0} of {1} entity: {2}".format(
mapping.header,
entity_type,
str(ex)
)
else:
message = "Couldn't parse {0} entity: {1}".format(entity_type, str(ex))
message += " See ColumnValues for detailed row information and InnerException for error details."
if six.PY2:
message = message.decode('ascii')
message += u' row values: {0}'.format(self)
return EntityReadException(message=message, row_values=str(self), inner_exception=ex)
def try_get_value(self, header):
if header not in self.mappings:
return False, None
return True, self[header]
def to_dict(self):
return dict([(k, self.columns[v]) for (k, v) in self.mappings.items()])
@property
def mappings(self):
return self._mappings
@property
def columns(self):
return self._columns
| 32.647887 | 116 | 0.622088 |
9ba2767efe636dd4c9e8fb48dfe16d3f8f84c22c | 864 | py | Python | discordSuperUtils/kick.py | Heapy1337/discord-super-utils | be9d65fbc957d017df534ac502457f387594a9c8 | [
"MIT"
] | 91 | 2021-07-14T13:01:31.000Z | 2022-03-25T10:28:49.000Z | discordSuperUtils/kick.py | KortaPo/discord-super-utils | b8c1cd1a986bc5c78eaf472bb5caf44dd7b605e4 | [
"MIT"
] | 14 | 2021-08-13T14:23:54.000Z | 2022-03-25T09:57:12.000Z | discordSuperUtils/kick.py | KortaPo/discord-super-utils | b8c1cd1a986bc5c78eaf472bb5caf44dd7b605e4 | [
"MIT"
] | 42 | 2021-08-02T00:27:24.000Z | 2022-03-31T15:47:37.000Z | from __future__ import annotations
from typing import TYPE_CHECKING
import discord
from .base import EventManager
from .punishments import Punisher
if TYPE_CHECKING:
from discord.ext import commands
from .punishments import Punishment
__all__ = ("KickManager",)
class KickManager(EventManager, Punisher):
"""
A KickManager that manages kicks for guilds.
"""
__slots__ = ("bot",)
def __init__(self, bot: commands.Bot):
super().__init__()
self.bot = bot
async def punish(
self, ctx: commands.Context, member: discord.Member, punishment: Punishment
) -> None:
try:
await member.kick(reason=punishment.punishment_reason)
except discord.errors.Forbidden as e:
raise e
else:
await self.call_event("on_punishment", ctx, member, punishment)
| 22.736842 | 83 | 0.670139 |
1161be2d229bcb8e3575277c1fe91c1cd915ed15 | 1,837 | py | Python | waterbutler/providers/box/metadata.py | alexschiller/waterbutler | 24014d7705aca3e99a6565fc3b9b4075ec6ec563 | [
"Apache-2.0"
] | null | null | null | waterbutler/providers/box/metadata.py | alexschiller/waterbutler | 24014d7705aca3e99a6565fc3b9b4075ec6ec563 | [
"Apache-2.0"
] | null | null | null | waterbutler/providers/box/metadata.py | alexschiller/waterbutler | 24014d7705aca3e99a6565fc3b9b4075ec6ec563 | [
"Apache-2.0"
] | null | null | null | from waterbutler.core import metadata
class BaseBoxMetadata(metadata.BaseMetadata):
def __init__(self, raw, path_obj):
super().__init__(raw)
self._path_obj = path_obj
@property
def provider(self):
return 'box'
@property
def materialized_path(self):
return str(self._path_obj)
class BoxFolderMetadata(BaseBoxMetadata, metadata.BaseFolderMetadata):
@property
def name(self):
return self.raw['name']
@property
def path(self):
return '/{}/'.format(self.raw['id'])
class BoxFileMetadata(BaseBoxMetadata, metadata.BaseFileMetadata):
@property
def name(self):
return self.raw['name']
@property
def path(self):
return '/{0}'.format(self.raw['id'])
@property
def size(self):
return self.raw.get('size')
@property
def modified(self):
return self.raw.get('modified_at')
@property
def content_type(self):
return None
@property
def extra(self):
return {
'etag': self.raw.get('etag'),
}
@property
def etag(self):
return '{}::{}'.format(self.raw.get('etag', ''), self.raw['id'])
class BoxRevision(metadata.BaseFileRevisionMetadata):
@property
def version(self):
try:
return self.raw['id']
except KeyError:
return self.raw['path'].split('/')[1]
@property
def version_identifier(self):
return 'revision'
@property
def path(self):
try:
return '/{0}/{1}'.format(self.raw['id'], self.raw['name'])
except KeyError:
return self.raw.get('path')
@property
def modified(self):
try:
return self.raw['modified_at']
except KeyError:
return self.raw.get('modified')
| 20.640449 | 72 | 0.583016 |
ed5df34c50965c70ba92a129e7d017d52fa4ecfb | 1,555 | py | Python | problem.py | nhatminh-96/datacamp-project | a9d42ae0ac5a8ecdfe3d084cc8c5ce71c60db4df | [
"MIT"
] | null | null | null | problem.py | nhatminh-96/datacamp-project | a9d42ae0ac5a8ecdfe3d084cc8c5ce71c60db4df | [
"MIT"
] | null | null | null | problem.py | nhatminh-96/datacamp-project | a9d42ae0ac5a8ecdfe3d084cc8c5ce71c60db4df | [
"MIT"
] | null | null | null | import os
import string
from glob import glob
import pandas as pd
import numpy as np
from sklearn.metrics import r2_score
from sklearn.model_selection import ShuffleSplit
import rampwf as rw
from rampwf.score_types.base import BaseScoreType
problem_title = "Salary prediction of NBA Basketball players"
_target_names = [
'Salary'
]
Predictions = rw.prediction_types.make_regression(label_names=_target_names)
workflow = rw.workflows.Regressor()
class R2(BaseScoreType):
is_lower_the_better = False
minimum = 0.0
maximum = 1.0
def __init__(self, name="r2_score", precision=4):
self.name = name
self.precision = precision
def __call__(self, y_true, y_pred):
r2 = r2_score(y_true, y_pred)
return r2
score_types = [
R2(name="r2_score"),
]
def get_train_data(path="./"):
train = pd.read_csv(os.path.join(path, "data", "train", 'train.csv'))
features = list(set((train.columns))- set(['SALARY', 'Season']))
X_train, y_train = train[features], train['SALARY']
X_train = X_train.reset_index()
return X_train.values, y_train.values.reshape(-1, 1)
def get_test_data(path="./"):
test = pd.read_csv(os.path.join(path, "data", "test", "test.csv"))
features = list(set((test.columns))- set(['SALARY', 'Season']))
X_test, y_test = test[features], test['SALARY']
X_test = X_test.reset_index()
return X_test.values, y_test.values.reshape(-1, 1)
def get_cv(X, y):
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=1)
return cv.split(X, y)
| 25.080645 | 76 | 0.689389 |
5ce574d95bd86b8ed06ced67440bd9cee5ef8310 | 2,041 | py | Python | aliyun-python-sdk-waf-openapi/aliyunsdkwaf_openapi/request/v20180117/DescribeAsyncTaskStatusRequest.py | silent-beaters/aliyun-openapi-python-sdk | 7a025eabdad622af07affc3a7beeae1c5def469d | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-waf-openapi/aliyunsdkwaf_openapi/request/v20180117/DescribeAsyncTaskStatusRequest.py | silent-beaters/aliyun-openapi-python-sdk | 7a025eabdad622af07affc3a7beeae1c5def469d | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-waf-openapi/aliyunsdkwaf_openapi/request/v20180117/DescribeAsyncTaskStatusRequest.py | silent-beaters/aliyun-openapi-python-sdk | 7a025eabdad622af07affc3a7beeae1c5def469d | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkwaf_openapi.endpoint import endpoint_data
class DescribeAsyncTaskStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'waf-openapi', '2018-01-17', 'DescribeAsyncTaskStatus','waf')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_WafRequestId(self):
return self.get_query_params().get('WafRequestId')
def set_WafRequestId(self,WafRequestId):
self.add_query_param('WafRequestId',WafRequestId)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region) | 35.807018 | 90 | 0.769721 |
081a49a65a65bfa876e41f0cb99c314e3e0c2642 | 4,239 | py | Python | abinitio/03_logistic_regression.py | Moreficent/xvi | fb9d4cf15638104480a4f1666d77644fe7ecc760 | [
"Apache-2.0"
] | null | null | null | abinitio/03_logistic_regression.py | Moreficent/xvi | fb9d4cf15638104480a4f1666d77644fe7ecc760 | [
"Apache-2.0"
] | null | null | null | abinitio/03_logistic_regression.py | Moreficent/xvi | fb9d4cf15638104480a4f1666d77644fe7ecc760 | [
"Apache-2.0"
] | null | null | null | """
Calibrates a logistic regression model to the data. The slope and intercept are
assumed to be normally distributed.
For simplicity, eager mode evaluation is used.
"""
from abinitio.utils import fix_seed
fix_seed(42)
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
np.set_printoptions(precision=2)
PRIOR_INTERCEPT_LOC = 0
PRIOR_INTERCEPT_SCALE = 1
PRIOR_SLOPE_LOC = 0
PRIOR_SLOPE_SCALE = 1
PRIOR_SAMPLES = 100
MODEL_SAMPLES = 8
POSTERIOR_SAMPLES = 500
@tf.function
def train_step(mu0, mu1, omega0, omega1, prior_intercept_dist, prior_slope_dist, xs, ys, optim):
with tf.GradientTape() as tape:
eta0 = tf.random.normal([1, MODEL_SAMPLES])
eta1 = tf.random.normal([1, MODEL_SAMPLES])
theta0 = mu0 + eta0 * tf.math.exp(omega0)
theta1 = mu1 + eta1 * tf.math.exp(omega1)
logit = theta0 + xs * theta1
dist = tfp.distributions.Bernoulli(logits=logit)
lp = dist.log_prob(ys)
lp = tf.math.reduce_mean(lp, axis=1)
lp = tf.math.reduce_sum(lp)
prior_theta0 = prior_intercept_dist.log_prob(theta0)
prior_theta1 = prior_slope_dist.log_prob(theta1)
constraint = tf.math.reduce_mean(prior_theta0 + prior_theta1) + omega0 + omega1
elbo = lp + constraint
loss = -elbo
model_parameters = [mu0, mu1, omega0, omega1]
gradients = tape.gradient(loss, model_parameters)
optim.apply_gradients(zip(gradients, model_parameters))
return loss
if __name__ == '__main__':
import matplotlib.pyplot as plt
# generating data
data_intercept = 0.5
data_slope = -0.5
data_samples = 50
xs = tf.random.uniform(shape=[data_samples, 1], minval=-2.0, maxval=2.0)
xs_prob = tf.math.sigmoid(data_intercept + data_slope * xs)
ys = tf.random.stateless_binomial(shape=[data_samples, 1], seed=[3141, 2718], counts=1, probs=xs_prob)
# constructing the model
prior_intercept_dist = tfp.distributions.Normal(PRIOR_INTERCEPT_LOC, PRIOR_INTERCEPT_SCALE)
prior_intercept_zeta = prior_intercept_dist.sample([PRIOR_SAMPLES])
init_mu0 = tf.math.reduce_mean(prior_intercept_zeta)
init_omega0 = tf.math.log(tf.math.reduce_std(prior_intercept_zeta))
mu0 = tf.Variable(initial_value=init_mu0, trainable=True, name='mu0')
omega0 = tf.Variable(initial_value=init_omega0, trainable=True, name='omega0')
prior_slope_dist = tfp.distributions.Normal(PRIOR_SLOPE_LOC, PRIOR_SLOPE_SCALE)
prior_slope_zeta = prior_slope_dist.sample([PRIOR_SAMPLES])
init_mu1 = tf.math.reduce_mean(prior_slope_zeta)
init_omega1 = tf.math.log(tf.math.reduce_std(prior_slope_zeta))
mu1 = tf.Variable(initial_value=init_mu1, trainable=True, name='mu1')
omega1 = tf.Variable(initial_value=init_omega1, trainable=True, name='omega1')
print('Initial Parameters')
print(f' > mu0: {init_mu0:1.3f}, omega0: {init_omega0:1.3f}')
print(f' > mu1: {init_mu1:1.3f}, omega1: {init_omega1:1.3f}')
# training the model
optim = tf.keras.optimizers.RMSprop(learning_rate=5e-2)
num_epochs = 2000
for idx in range(num_epochs):
loss = train_step(mu0, mu1, omega0, omega1, prior_intercept_dist, prior_slope_dist, xs, ys, optim)
if idx % 100 == 0:
loss = loss.numpy()
rep0 = f'mu0: {mu0.numpy():1.3f}, omega0: {omega0.numpy():1.3f}'
rep1 = f'mu1: {mu1.numpy():1.3f}, omega1: {omega1.numpy():1.3f}'
print(f'[{idx:04d}] loss: {loss:1.3f} {rep0} {rep1}')
# visualizing
posterior_intercept_samples = tf.random.normal(shape=[POSTERIOR_SAMPLES],
mean=mu0.numpy(),
stddev=tf.math.exp(omega0).numpy()).numpy()
posterior_slope_samples = tf.random.normal(shape=[POSTERIOR_SAMPLES],
mean=mu1.numpy(),
stddev=tf.math.exp(omega1).numpy()).numpy()
fig, axs = plt.subplots(1, 2)
ax = axs[0]
ax.hist(posterior_intercept_samples)
ax.set_xlabel('intercept')
ax = axs[1]
ax.hist(posterior_slope_samples)
ax.set_xlabel('slope')
plt.show()
print('ho gaya')
| 36.543103 | 106 | 0.664544 |
1d017fc0c5787ae11112d0f722270347cb5c8b7d | 278 | py | Python | DMCNN/constant.py | Hspix/Adv-ED | 60bf9546c91558ca956a3957003a14166590a8f0 | [
"MIT"
] | 69 | 2019-04-03T13:55:05.000Z | 2022-03-31T08:04:46.000Z | DMCNN/constant.py | Hspix/Adv-ED | 60bf9546c91558ca956a3957003a14166590a8f0 | [
"MIT"
] | 13 | 2019-05-10T13:38:34.000Z | 2021-10-21T03:00:35.000Z | DMCNN/constant.py | Hspix/Adv-ED | 60bf9546c91558ca956a3957003a14166590a8f0 | [
"MIT"
] | 21 | 2019-04-05T06:04:53.000Z | 2021-12-26T08:53:36.000Z | dataPath="./"
MaxPos=2*SenLen
SenLen=60
LocalLen=1
dimWE=300
dimPE=5
dimC=200
dimE=22
filter_size=3
keepProb=0.5
Epoch=7
BatchSize=170
vec_file="wiki.multi.en.vec"
AugTimes=1
NegCnt=20
Threshold=6
ItemTimes=10
alpha=4.0
sLr=0.005
dLr=0.02
EncodedDim=2*dimC+(2*LocalLen+1)*dimWE
| 12.636364 | 38 | 0.773381 |
5700439c7408fec1f3a4dd470bbb9f340e96eb75 | 9,444 | py | Python | features/steps/pipeline_steps.py | guludo/kedro | 274b5011204082697835f272a7f44ae1549e608d | [
"Apache-2.0"
] | null | null | null | features/steps/pipeline_steps.py | guludo/kedro | 274b5011204082697835f272a7f44ae1549e608d | [
"Apache-2.0"
] | null | null | null | features/steps/pipeline_steps.py | guludo/kedro | 274b5011204082697835f272a7f44ae1549e608d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""Behave step definitions for running pipelines end to end."""
import logging
import shutil
import sys
from io import StringIO
from pathlib import Path
from shutil import copyfile
import behave
import pandas as pd
import yaml
from behave import given, then, when
from pandas.util.testing import assert_frame_equal
import features.steps.util as util
from features.steps.sh_run import run
from kedro.io import DataCatalog, MemoryDataSet
LOG_EVIDENCE_PHRASE = "Running a risky operation"
INTENTIONAL_FAILURE = "Something intentionally went wrong."
INDENT4 = " " * 4
INDENTED_NEWLINE = "\n" + INDENT4
behave.register_type(CSV=util.parse_csv)
def identity(item):
"""Function intended for identity node."""
return item
def concatenate(item1, item2):
"""Function intended for concatinating two items."""
return item1 + item2
def sum_dfs(dataframe1, dataframe2):
"""pd.DataFrame Sum method that outputs a warning in the logs."""
logging.getLogger("kedro.runner").warning(LOG_EVIDENCE_PHRASE)
return dataframe1 + dataframe2.values
def failing_function(item):
"""Fail with an exception."""
raise RuntimeError(INTENTIONAL_FAILURE)
def _set_up_temp_logging(context):
context.log_data = StringIO()
handler = logging.StreamHandler(context.log_data)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger("kedro.runner")
logger.setLevel(logging.INFO)
logger.handlers = []
logger.addHandler(handler)
def _setup_template_files(context):
curr_dir = Path(__file__).parent
hooks_file = (
context.root_project_dir
/ "src"
/ context.project_name.replace("-", "_")
/ "hooks.py"
)
catalog_file = context.root_project_dir / "conf" / "base" / "catalog.yml"
if hooks_file.exists():
hooks_file.unlink()
copyfile(str(curr_dir / "hooks_template.py"), str(hooks_file))
if catalog_file.exists():
catalog_file.unlink()
copyfile(str(curr_dir / "e2e_test_catalog.yml"), str(catalog_file))
with catalog_file.open("r") as cat_file:
catalog = yaml.safe_load(cat_file)
context.output_file1 = context.root_project_dir / catalog["E"]["filepath"]
context.output_file2 = context.root_project_dir / catalog["F"]["filepath"]
df_a = pd.DataFrame({"col1": [1, 2], "col2": [3, 4], "col3": [5, 6]})
df_c = pd.DataFrame({"col1": [9, 8], "col2": [7, 6], "col3": [5, 4]})
df_a.to_csv(context.root_project_dir / catalog["A"]["filepath"], index=False)
df_c.to_csv(context.root_project_dir / catalog["C"]["filepath"], index=False)
_add_external_packages(context)
def _add_external_packages(context):
external_packages = getattr(context, "external_packages", [])
if not external_packages:
return
pipeline_file = (
context.root_project_dir
/ "src"
/ context.project_name.replace("-", "_")
/ "hooks.py"
)
with pipeline_file.open("at", encoding="utf-8") as _pf:
_imports = "\n".join("import {}".format(p) for p in external_packages)
_pf.write(f"\n{_imports}\n")
requirements_file = context.root_project_dir / "src" / "requirements.txt"
with requirements_file.open("at", encoding="utf-8") as _reqs:
_reqs.write("\n".join(external_packages) + "\n")
def _create_template_project(context):
# Sets the following fields on the context:
# - project_name (the simple project name == project-pipeline)
# - temp_dir (the directory containing the created project)
# - root_project_dir (the full path to the created project)
# - include_example (the project contains code example)
context.project_name = "project-pipeline"
context.config_file = context.temp_dir / "config"
root_project_dir = context.temp_dir / context.project_name
context.root_project_dir = root_project_dir
context.include_example = True
config = {
"project_name": context.project_name,
"repo_name": context.project_name,
"output_dir": str(context.temp_dir),
"python_package": context.project_name.replace("-", "_"),
"include_example": context.include_example,
}
with context.config_file.open("w") as config_file:
yaml.dump(config, config_file, default_flow_style=False)
res = run([context.kedro, "new", "-c", str(context.config_file)], env=context.env)
assert res.returncode == 0
_setup_template_files(context)
def resolve_free_inputs(context):
catalog = context.catalog if hasattr(context, "catalog") else None
feed_dict = context.feed_dict if hasattr(context, "feed_dict") else None
return catalog, feed_dict
@given(
"I have defined an io catalog containing "
'["{key1}", "{key2}", "{key3}", "{key4}"]'
)
def set_catalog(context, key1, key2, key3, key4):
ds1 = pd.DataFrame({"col1": [1, 2], "col2": [3, 4], "col3": [5, 6]})
ds2 = pd.DataFrame({"col1": [9, 8], "col2": [7, 6], "col3": [5, 4]})
context.catalog = DataCatalog(
{
key1: MemoryDataSet(ds1),
key2: MemoryDataSet(),
key3: MemoryDataSet(ds2),
key4: MemoryDataSet(),
}
)
@given('I have added external packages "{external_packages}" to project requirements')
def add_external_packages(context, external_packages):
context.external_packages = [p.strip() for p in external_packages.split(",")]
@given("I have included a pipeline definition in a project template")
def create_template_with_pipeline(context):
_create_template_project(context)
@given('I have defined a node "{node_name}" tagged with {tags:CSV}')
def node_tagged_with(context, node_name, tags):
"""
Check tagging in `hooks_template.py` is consistent with tagging
descriptions in background steps
"""
sys.path.append(
str(context.root_project_dir / "src" / context.project_name.replace("-", "_"))
)
# pylint: disable=import-outside-toplevel
import hooks
# pylint: disable=no-member
context.project_pipeline = hooks.project_hooks.register_pipelines()["__default__"]
node_objs = [n for n in context.project_pipeline.nodes if n.name == node_name]
assert node_objs
assert set(tags) == node_objs[0].tags
@given('I have set the project log level to "{log_level}"')
def change_log_level_proj(context, log_level):
logging_yml = context.root_project_dir / "conf" / "base" / "logging.yml"
with logging_yml.open() as file_handle:
logging_conf = yaml.safe_load(file_handle)
logging_conf["handlers"]["console"]["level"] = log_level
logging_conf["loggers"]["kedro.pipeline"]["level"] = log_level
logging_conf["root"]["level"] = log_level
with logging_yml.open("w") as file_handle:
yaml.dump(logging_conf, file_handle)
@when("the template pipeline is run")
def run_template_pipeline(context):
run_cmd = [context.kedro, "run"]
context.run_result = run(
run_cmd, env=context.env, cwd=str(context.root_project_dir)
)
if context.run_result.returncode == 0:
context.df_e = pd.read_csv(context.output_file1)
context.df_f = pd.read_csv(context.output_file2)
shutil.rmtree(str(context.root_project_dir))
@then("it should successfully produce the results")
def check_template_run_success(context):
if context.run_result.returncode:
print(context.run_result.stdout)
print(context.run_result.stderr)
assert False
assert_frame_equal(context.df_e, context.df_f)
assert context.df_e.values.tolist() == [[10, 10, 10], [10, 10, 10]]
assert context.df_f.values.tolist() == [[10, 10, 10], [10, 10, 10]]
@then('it should fail with an error message including "{msg}"')
def check_template_run_fail(context, msg):
assert context.run_result.returncode > 0
try:
assert msg in context.run_result.stderr
except AssertionError:
print(context.run_result.stderr)
raise
| 34.593407 | 86 | 0.700868 |
10d17970df085b6489e73c01f804dd7ebad03df2 | 1,657 | py | Python | language/generated-samples/v1/language_sentiment_text.py | yshalabi/python-docs-samples | 591787c01d94102ba9205f998d95a05b39ccad2f | [
"Apache-2.0"
] | 2 | 2020-09-19T04:22:52.000Z | 2020-09-23T14:04:17.000Z | language/generated-samples/v1/language_sentiment_text.py | yshalabi/python-docs-samples | 591787c01d94102ba9205f998d95a05b39ccad2f | [
"Apache-2.0"
] | 1 | 2020-07-24T19:18:29.000Z | 2020-07-24T19:45:23.000Z | language/generated-samples/v1/language_sentiment_text.py | yshalabi/python-docs-samples | 591787c01d94102ba9205f998d95a05b39ccad2f | [
"Apache-2.0"
] | 2 | 2020-09-13T03:47:22.000Z | 2020-09-23T14:04:19.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "analyze_sentiment")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
import sys
# [START language_sentiment_text]
from google.cloud import language_v1
from google.cloud.language_v1 import enums
import six
def sample_analyze_sentiment(content):
client = language_v1.LanguageServiceClient()
# content = 'Your text to analyze, e.g. Hello, world!'
if isinstance(content, six.binary_type):
content = content.decode('utf-8')
type_ = enums.Document.Type.PLAIN_TEXT
document = {'type': type_, 'content': content}
response = client.analyze_sentiment(document)
sentiment = response.document_sentiment
print('Score: {}'.format(sentiment.score))
print('Magnitude: {}'.format(sentiment.magnitude))
# [END language_sentiment_text]
def main():
# FIXME: Convert argv from strings to the correct types.
sample_analyze_sentiment(*sys.argv[1:])
if __name__ == '__main__':
main()
| 28.084746 | 76 | 0.731442 |
df93f8f0960187b4e241d5b7a4cd74625f8164da | 281 | py | Python | examples/pyplot/donut.py | danielhrisca/vedo | 487568b7956a67b87752e3d518ba3f7e87b327a6 | [
"CC0-1.0"
] | 1 | 2021-04-25T06:28:01.000Z | 2021-04-25T06:28:01.000Z | examples/pyplot/donut.py | danielhrisca/vedo | 487568b7956a67b87752e3d518ba3f7e87b327a6 | [
"CC0-1.0"
] | null | null | null | examples/pyplot/donut.py | danielhrisca/vedo | 487568b7956a67b87752e3d518ba3f7e87b327a6 | [
"CC0-1.0"
] | null | null | null | from vedo.pyplot import donut
title = "A donut plot"
fractions = [0.1, 0.2, 0.3, 0.1, 0.3]
colors = [ 1, 2, 3, 4, 'white']
labels = ["stuff_1 ", "stuff_2 ", "comp^A ", "comp^B ", ""]
dn = donut(fractions, c=colors, labels=labels, title=title)
dn.show(axes=None)
| 25.545455 | 62 | 0.580071 |
2fc66d1554962edb49b16b8283859e5335830dfc | 483 | py | Python | curso-em-video/aula_13/estrutura-de-repeticao-for.py | talysonxx/python | 520b108731e28c7dc1fca3523b925be506fd8340 | [
"MIT"
] | null | null | null | curso-em-video/aula_13/estrutura-de-repeticao-for.py | talysonxx/python | 520b108731e28c7dc1fca3523b925be506fd8340 | [
"MIT"
] | null | null | null | curso-em-video/aula_13/estrutura-de-repeticao-for.py | talysonxx/python | 520b108731e28c7dc1fca3523b925be506fd8340 | [
"MIT"
] | null | null | null | '''for c in range(6, 0, -2): 5 vezes, no 6 ele para, não faz ele
print(c)'''
'''n = int(input('Digite um número: '))
passo = int(input('Digite o passo: '))
if passo > 0:
for c in range(0, n + 1, passo):
print(c, end=' > ')
else:
for c in range(n, -1, passo):
print(c, end=' > ')'''
nome = str(input('Digite o seu nome completo: ')).strip().title().split()
quantidade = len(nome)
for c in range(0, quantidade):
print(nome[c], end=' ')
print('\nFIM')
| 26.833333 | 73 | 0.559006 |
5e5a8272c55e5b91ed103aa9b33c929e535e2b30 | 6,636 | py | Python | einsteinish/settings.py | Einsteinish/Einstein | f65d705bbc118f4c4f7f85b4e3b121cc62ee6350 | [
"MIT"
] | 28 | 2016-01-27T04:27:40.000Z | 2021-09-13T18:42:15.000Z | einsteinish/settings.py | Einsteinish/Einstein | f65d705bbc118f4c4f7f85b4e3b121cc62ee6350 | [
"MIT"
] | 4 | 2021-06-08T18:55:52.000Z | 2022-01-13T00:36:36.000Z | einsteinish/settings.py | Einsteinish/Einstein | f65d705bbc118f4c4f7f85b4e3b121cc62ee6350 | [
"MIT"
] | 21 | 2016-01-17T18:29:59.000Z | 2021-11-18T10:51:25.000Z | import os
# secrets are defined in local_settings.py
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DEBUG = True
ADMINS = (
#('K Hong', 'contact.einsteinish@gmail.com'),
('K Hong', 'einsteinish@aol.com'),
)
MANAGERS = ADMINS
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.sitemaps',
'django.contrib.flatpages',
)
THIRD_PARTY_APPS = (
'markdown_deux',
'haystack',
'django_extensions',
'guardian',
'widget_tweaks',
'braces',
'djangoratings',
'disqus',
)
LOCAL_APPS = (
'registration',
'profiles',
'resources',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'assets')
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Tijuana'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
USE_L10N = True
USE_TZ = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
os.path.join(PROJECT_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.static',
'django.template.context_processors.tz',
],
},
},
]
ROOT_URLCONF = 'einsteinish.urls'
WSGI_APPLICATION = 'einsteinish.wsgi.application'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#HAYSTACK settings
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.BaseSignalProcessor'
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 12
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'INDEX_NAME': 'haystack',
},
}
# For light traffic
# HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
#DJANGO extended settings we are using
LOGIN_REDIRECT_URL = '/resource/'
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: "/profile/%s/" % u.username,
}
#SERVER_EMAIL = "kihyuck.hong@gmail.com"
SERVER_EMAIL = "k.hong@aol.com"
#Django-Registration Settings
ACCOUNT_ACTIVATION_DAYS = 7
#DEFAULT_FROM_EMAIL = "k.hong@aol.com"
DEFAULT_FROM_EMAIL = "kihyuck.hong@gmail.com"
#Django-Guardian Settings
ANONYMOUS_USER_ID = -1
GUARDIAN_RENDER_403 = True
#Django-Ratings Settings
RATINGS_VOTES_PER_IP = 2000 #TODO all votes are essentially from 127.0.0.1. This can lead to problems
#Disqus Settings
DISQUS_API_KEY = 'lTuOXBAfTK3symHWvi7cZHgcYipkL32BoSud7f0H4gl4lfVhVw0HCcbcmiu1rWJY'
DISQUS_WEBSITE_SHORTNAME = 'einsteinish'
'''
################################
# Moved to local_settings.py
# sqlite
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'einsteinish.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# mysql
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'einsteinishuserdb',
'USER': 'user', # Not used with sqlite3.
'PASSWORD': 'password', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Moved to local_settings.py
#SITE_ID = 1 # example.com
#SITE_ID = 2 # einsteinish.com
SITE_ID = 3 # localhost:8000
# Moved to local_settings.py
#EMAIL_HOST = 'smtp.gmail.com'
#EMAIL_HOST_USER = 'kihyuck.hong@gmail.com'
#EMAIL_HOST_PASSWORD = 'password'
EMAIL_HOST = 'smtp.aol.com'
EMAIL_HOST_USER = 'k.hong@aol.com'
EMAIL_HOST_PASSWORD = 'password'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Moved to local_settings.py
SECRET_KEY = 'einsteinishcom'
################################
'''
#Import Local and Prod settings
try:
from local_settings import *
except ImportError:
pass
try:
from prod_settings import *
except ImportError:
pass
| 27.308642 | 108 | 0.633213 |
9de86cdb3884ad2d86fb4bfac38827c44785fa5c | 2,792 | py | Python | xain/benchmark/exec/__main__.py | danieljanes/ox-msc-diss-code-freeze | 20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78 | [
"Apache-2.0"
] | 1 | 2020-05-30T20:34:19.000Z | 2020-05-30T20:34:19.000Z | xain/benchmark/exec/__main__.py | danieljanes/ox-msc-diss-code-freeze | 20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78 | [
"Apache-2.0"
] | null | null | null | xain/benchmark/exec/__main__.py | danieljanes/ox-msc-diss-code-freeze | 20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78 | [
"Apache-2.0"
] | null | null | null | import atexit
import time
from absl import app, flags
from xain.datasets import load_splits
from xain.helpers import storage
from xain.ops import results
from . import run
FLAGS = flags.FLAGS
def after_main(group_name: str, task_name: str):
"""Will run after main exists (successfully or otherwise)"""
# Push results once task has finished
results.push(group_name=group_name, task_name=task_name)
def main(_):
# Set exit callback
if FLAGS.push_results:
atexit.register(
after_main, group_name=FLAGS.group_name, task_name=FLAGS.task_name
)
# Load data
xy_train_partitions, xy_val, xy_test = load_splits(FLAGS.dataset)
# Execute training
start = time.time()
partition_id = FLAGS.partition_id
num_participants = 1 # For unitary training
hist_opt_configs = None # For unitary training
hist_metrics = None # For unitary training
if partition_id is not None: # Use only a single partition if required (unitary)
hist, loss, acc = run.unitary_training(
model_name=FLAGS.model,
xy_train=xy_train_partitions[partition_id],
xy_val=xy_val,
xy_test=xy_test,
E=FLAGS.E,
B=FLAGS.B,
)
else:
hist, _, hist_opt_configs, hist_metrics, loss, acc = run.federated_training(
model_name=FLAGS.model,
xy_train_partitions=xy_train_partitions,
xy_val=xy_val,
xy_test=xy_test,
R=FLAGS.R,
E=FLAGS.E,
C=FLAGS.C,
B=FLAGS.B,
)
num_participants = len(xy_train_partitions)
end = time.time()
# Write results
res = {
"group_name": FLAGS.group_name,
"task_name": FLAGS.task_name,
"task_label": FLAGS.task_label,
"dataset": FLAGS.dataset,
"model": FLAGS.model,
"R": FLAGS.R,
"E": FLAGS.E,
"C": FLAGS.C,
"B": FLAGS.B,
"partition_id": partition_id,
"start": start,
"end": end,
"duration": end - start,
"loss": float(loss),
"acc": float(acc),
"hist": hist,
"hist_opt_configs": hist_opt_configs,
"hist_metrics": hist_metrics,
"num_participants": num_participants,
}
storage.write_json(res, fname="results.json")
if __name__ == "__main__":
flags.mark_flag_as_required("group_name")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("model")
flags.mark_flag_as_required("dataset")
flags.mark_flag_as_required("R")
flags.mark_flag_as_required("E")
flags.mark_flag_as_required("C")
flags.mark_flag_as_required("B")
# Note: Flag partition_id is not required (i.e. optional)
app.run(main=main)
| 28.489796 | 85 | 0.629656 |
f928ca718b06a18a95ba5d4aa3339f2b3a4f851e | 2,903 | py | Python | util/fetch_currency_codelist.py | Inova-MPRJ/standard | e979b5b8193da70b21cc08034f14bc4950fd389b | [
"Apache-2.0"
] | 1 | 2021-04-01T12:03:24.000Z | 2021-04-01T12:03:24.000Z | util/fetch_currency_codelist.py | Inova-MPRJ/standard | e979b5b8193da70b21cc08034f14bc4950fd389b | [
"Apache-2.0"
] | null | null | null | util/fetch_currency_codelist.py | Inova-MPRJ/standard | e979b5b8193da70b21cc08034f14bc4950fd389b | [
"Apache-2.0"
] | null | null | null | """
Updates the currency codelist from ISO4217 files.
"""
import csv
import os.path
import re
import requests
from lxml import etree
from helper import json_dump, json_load, schema_dir
def get_and_parse_xml(url):
response = requests.get(url)
response.raise_for_status()
return etree.fromstring(response.content)
if __name__ == '__main__':
# "List one: Current currency & funds code list"
# https://www.currency-iso.org/en/home/tables/table-a1.html
current_codes = {}
tree = get_and_parse_xml('https://www.currency-iso.org/dam/downloads/lists/list_one.xml')
for node in tree.xpath('//CcyNtry'):
match = node.xpath('./Ccy')
# Entries like Antarctica have no universal currency.
if match:
code = node.xpath('./Ccy')[0].text
title = node.xpath('./CcyNm')[0].text.strip()
if code not in current_codes:
current_codes[code] = title
# We should expect currency titles to be consistent across countries.
elif current_codes[code] != title:
raise Exception('expected {}, got {}'.format(current_codes[code], title))
# "List three: List of codes for historic denominations of currencies & funds"
# https://www.currency-iso.org/en/home/tables/table-a3.html
historic_codes = {}
tree = get_and_parse_xml('https://www.currency-iso.org/dam/downloads/lists/list_three.xml')
for node in tree.xpath('//HstrcCcyNtry'):
code = node.xpath('./Ccy')[0].text
title = node.xpath('./CcyNm')[0].text.strip()
valid_until = node.xpath('./WthdrwlDt')[0].text
# Use ISO8601 interval notation.
valid_until = re.sub(r'^(\d{4})-(\d{4})$', r'\1/\2', valid_until.replace(' to ', '/'))
if code not in current_codes:
if code not in historic_codes:
historic_codes[code] = {'Title': title, 'Valid Until': valid_until}
# If the code is historical, use the most recent title and valid date.
elif valid_until > historic_codes[code]['Valid Until']:
historic_codes[code] = {'Title': title, 'Valid Until': valid_until}
with open(os.path.join(schema_dir, 'codelists', 'currency.csv'), 'w') as fp:
writer = csv.writer(fp, lineterminator='\n')
writer.writerow(['Code', 'Title', 'Valid Until'])
for code in sorted(current_codes.keys()):
writer.writerow([code, current_codes[code], None])
for code in sorted(historic_codes.keys()):
writer.writerow([code, historic_codes[code]['Title'], historic_codes[code]['Valid Until']])
release_schema = json_load('release-schema.json')
codes = sorted(list(current_codes.keys()) + list(historic_codes.keys()))
release_schema['definitions']['Value']['properties']['currency']['enum'] = codes + [None]
json_dump('release-schema.json', release_schema)
| 42.691176 | 103 | 0.643128 |
71f45bb99b87f56c61c26438172a76aba1880001 | 486 | py | Python | data/scripts/templates/object/tangible/ship/components/shield_generator/shared_shd_prototype_shield.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/ship/components/shield_generator/shared_shd_prototype_shield.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/ship/components/shield_generator/shared_shd_prototype_shield.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/shield_generator/shared_shd_prototype_shield.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","shd_prototype")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 28.588235 | 101 | 0.744856 |
cb165872af7918769292ad036714b4a58207d426 | 3,414 | py | Python | state-change-localization-classification/slowFast-perceiver/models/head_helper.py | EGO4D/hands-and-objects | 76d6ce6af1a9db4007ea24eb315f3f0eaea26bc2 | [
"MIT"
] | 24 | 2021-10-15T20:17:38.000Z | 2022-03-30T18:54:55.000Z | state-change-localization-classification/slowFast-perceiver/models/head_helper.py | EGO4D/hands-and-objects | 76d6ce6af1a9db4007ea24eb315f3f0eaea26bc2 | [
"MIT"
] | 1 | 2022-03-09T03:35:42.000Z | 2022-03-10T20:50:24.000Z | state-change-localization-classification/slowFast-perceiver/models/head_helper.py | EGO4D/hands-and-objects | 76d6ce6af1a9db4007ea24eb315f3f0eaea26bc2 | [
"MIT"
] | 4 | 2021-11-18T19:22:16.000Z | 2022-03-21T02:51:35.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""ResNe(X)t Head helper."""
import torch
import torch.nn as nn
class ResNetBasicHead(nn.Module):
"""
ResNe(X)t 3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(
self, dim_in, num_classes, pool_size, dropout_rate=0.0, act_func="softmax"
):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetBasicHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(ResNetBasicHead, self).__init__()
assert (
len({len(pool_size), len(dim_in)}) == 1
), "pathway dimensions are not consistent."
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1)
self.add_module("pathway{}_avgpool".format(pathway), avg_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=4)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation" "function.".format(act_func)
)
def forward(self, inputs):
assert (
len(inputs) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
m = getattr(self, "pathway{}_avgpool".format(pathway))
pool_out.append(m(inputs[pathway]))
x = torch.cat(pool_out, 1)
# (N, C, T, H, W) -> (N, T, H, W, C).
x = x.permute((0, 2, 3, 4, 1))
# Perform dropout.
if hasattr(self, "dropout"):
x = self.dropout(x)
x = self.projection(x)
# Performs fully convlutional inference.
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x,
| 38.359551 | 86 | 0.597247 |
3cfc676c08414645598b7ddf6c37fd266ffee369 | 9,246 | py | Python | zerver/views/video_calls.py | mehant-kr/zulip | acd34e27627b227c66c4a3dce64d38563600852f | [
"Apache-2.0"
] | 3 | 2019-02-03T20:46:55.000Z | 2019-03-04T15:44:28.000Z | zerver/views/video_calls.py | mehant-kr/zulip | acd34e27627b227c66c4a3dce64d38563600852f | [
"Apache-2.0"
] | 1 | 2022-01-24T09:38:09.000Z | 2022-01-24T09:38:09.000Z | zerver/views/video_calls.py | mehant-kr/zulip | acd34e27627b227c66c4a3dce64d38563600852f | [
"Apache-2.0"
] | 2 | 2021-07-02T14:15:24.000Z | 2021-08-16T12:31:49.000Z | import hashlib
import json
import random
import secrets
from base64 import b32encode
from functools import partial
from typing import Dict
from urllib.parse import quote, urlencode, urljoin
import requests
from defusedxml import ElementTree
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.middleware import csrf
from django.shortcuts import redirect, render
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.translation import gettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from oauthlib.oauth2 import OAuth2Error
from requests_oauthlib import OAuth2Session
from zerver.decorator import REQ, has_request_variables, zulip_login_required
from zerver.lib.actions import do_set_zoom_token
from zerver.lib.exceptions import ErrorCode, JsonableError
from zerver.lib.pysa import mark_sanitized
from zerver.lib.response import json_success
from zerver.lib.subdomains import get_subdomain
from zerver.lib.url_encoding import add_query_arg_to_redirect_url, add_query_to_redirect_url
from zerver.lib.validator import check_dict, check_string
from zerver.models import UserProfile, get_realm
class InvalidZoomTokenError(JsonableError):
code = ErrorCode.INVALID_ZOOM_TOKEN
def __init__(self) -> None:
super().__init__(_("Invalid Zoom access token"))
def get_zoom_session(user: UserProfile) -> OAuth2Session:
if settings.VIDEO_ZOOM_CLIENT_ID is None:
raise JsonableError(_("Zoom credentials have not been configured"))
return OAuth2Session(
settings.VIDEO_ZOOM_CLIENT_ID,
redirect_uri=urljoin(settings.ROOT_DOMAIN_URI, "/calls/zoom/complete"),
auto_refresh_url="https://zoom.us/oauth/token",
auto_refresh_kwargs={
"client_id": settings.VIDEO_ZOOM_CLIENT_ID,
"client_secret": settings.VIDEO_ZOOM_CLIENT_SECRET,
},
token=user.zoom_token,
token_updater=partial(do_set_zoom_token, user),
)
def get_zoom_sid(request: HttpRequest) -> str:
# This is used to prevent CSRF attacks on the Zoom OAuth
# authentication flow. We want this value to be unpredictable and
# tied to the session, but we don’t want to expose the main CSRF
# token directly to the Zoom server.
csrf.get_token(request)
# Use 'mark_sanitized' to cause Pysa to ignore the flow of user controlled
# data out of this function. 'request.META' is indeed user controlled, but
# post-HMAC output is no longer meaningfully controllable.
return mark_sanitized(
""
if getattr(request, "_dont_enforce_csrf_checks", False)
else salted_hmac("Zulip Zoom sid", request.META["CSRF_COOKIE"]).hexdigest()
)
@zulip_login_required
@never_cache
def register_zoom_user(request: HttpRequest) -> HttpResponse:
oauth = get_zoom_session(request.user)
authorization_url, state = oauth.authorization_url(
"https://zoom.us/oauth/authorize",
state=json.dumps(
{"realm": get_subdomain(request), "sid": get_zoom_sid(request)},
),
)
return redirect(authorization_url)
@never_cache
@has_request_variables
def complete_zoom_user(
request: HttpRequest,
state: Dict[str, str] = REQ(
json_validator=check_dict([("realm", check_string)], value_validator=check_string)
),
) -> HttpResponse:
if get_subdomain(request) != state["realm"]:
return redirect(urljoin(get_realm(state["realm"]).uri, request.get_full_path()))
return complete_zoom_user_in_realm(request)
@zulip_login_required
@has_request_variables
def complete_zoom_user_in_realm(
request: HttpRequest,
code: str = REQ(),
state: Dict[str, str] = REQ(
json_validator=check_dict([("sid", check_string)], value_validator=check_string)
),
) -> HttpResponse:
if not constant_time_compare(state["sid"], get_zoom_sid(request)):
raise JsonableError(_("Invalid Zoom session identifier"))
oauth = get_zoom_session(request.user)
try:
token = oauth.fetch_token(
"https://zoom.us/oauth/token",
code=code,
client_secret=settings.VIDEO_ZOOM_CLIENT_SECRET,
)
except OAuth2Error:
raise JsonableError(_("Invalid Zoom credentials"))
do_set_zoom_token(request.user, token)
return render(request, "zerver/close_window.html")
def make_zoom_video_call(request: HttpRequest, user: UserProfile) -> HttpResponse:
oauth = get_zoom_session(user)
if not oauth.authorized:
raise InvalidZoomTokenError
try:
res = oauth.post("https://api.zoom.us/v2/users/me/meetings", json={})
except OAuth2Error:
do_set_zoom_token(user, None)
raise InvalidZoomTokenError
if res.status_code == 401:
do_set_zoom_token(user, None)
raise InvalidZoomTokenError
elif not res.ok:
raise JsonableError(_("Failed to create Zoom call"))
return json_success({"url": res.json()["join_url"]})
@csrf_exempt
@require_POST
@has_request_variables
def deauthorize_zoom_user(request: HttpRequest) -> HttpResponse:
data = json.loads(request.body)
payload = data["payload"]
if payload["user_data_retention"] == "false":
requests.post(
"https://api.zoom.us/oauth/data/compliance",
json={
"client_id": settings.VIDEO_ZOOM_CLIENT_ID,
"user_id": payload["user_id"],
"account_id": payload["account_id"],
"deauthorization_event_received": payload,
"compliance_completed": True,
},
auth=(settings.VIDEO_ZOOM_CLIENT_ID, settings.VIDEO_ZOOM_CLIENT_SECRET),
).raise_for_status()
return json_success()
def get_bigbluebutton_url(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
# https://docs.bigbluebutton.org/dev/api.html#create for reference on the API calls
# https://docs.bigbluebutton.org/dev/api.html#usage for reference for checksum
id = "zulip-" + str(random.randint(100000000000, 999999999999))
password = b32encode(secrets.token_bytes(7))[:10].decode()
checksum = hashlib.sha1(
(
"create"
+ "meetingID="
+ id
+ "&moderatorPW="
+ password
+ "&attendeePW="
+ password
+ "a"
+ settings.BIG_BLUE_BUTTON_SECRET
).encode()
).hexdigest()
url = add_query_to_redirect_url(
"/calls/bigbluebutton/join",
urlencode(
{
"meeting_id": id,
"password": password,
"checksum": checksum,
}
),
)
return json_success({"url": url})
# We use zulip_login_required here mainly to get access to the user's
# full name from Zulip to prepopulate the user's name in the
# BigBlueButton meeting. Since the meeting's details are encoded in
# the link the user is clicking, there is no validation tying this
# meeting to the Zulip organization it was created in.
@zulip_login_required
@never_cache
@has_request_variables
def join_bigbluebutton(
request: HttpRequest,
meeting_id: str = REQ(),
password: str = REQ(),
checksum: str = REQ(),
) -> HttpResponse:
if settings.BIG_BLUE_BUTTON_URL is None or settings.BIG_BLUE_BUTTON_SECRET is None:
raise JsonableError(_("BigBlueButton is not configured."))
else:
try:
response = requests.get(
add_query_to_redirect_url(
settings.BIG_BLUE_BUTTON_URL + "api/create",
urlencode(
{
"meetingID": meeting_id,
"moderatorPW": password,
"attendeePW": password + "a",
"checksum": checksum,
}
),
)
)
response.raise_for_status()
except requests.RequestException:
raise JsonableError(_("Error connecting to the BigBlueButton server."))
payload = ElementTree.fromstring(response.text)
if payload.find("messageKey").text == "checksumError":
raise JsonableError(_("Error authenticating to the BigBlueButton server."))
if payload.find("returncode").text != "SUCCESS":
raise JsonableError(_("BigBlueButton server returned an unexpected error."))
join_params = urlencode( # type: ignore[type-var] # https://github.com/python/typeshed/issues/4234
{
"meetingID": meeting_id,
"password": password,
"fullName": request.user.full_name,
},
quote_via=quote,
)
checksum = hashlib.sha1(
("join" + join_params + settings.BIG_BLUE_BUTTON_SECRET).encode()
).hexdigest()
redirect_url_base = add_query_to_redirect_url(
settings.BIG_BLUE_BUTTON_URL + "api/join", join_params
)
return redirect(add_query_arg_to_redirect_url(redirect_url_base, "checksum=" + checksum))
| 35.837209 | 107 | 0.670452 |
085a4496e8598abff2b4734288d57e98290543ad | 158,563 | py | Python | pyvttbl/stats/_stats.py | yk/pyvttbl | af66c1aba410ba5386249cd5b95f2ae0ed01d870 | [
"BSD-3-Clause"
] | null | null | null | pyvttbl/stats/_stats.py | yk/pyvttbl | af66c1aba410ba5386249cd5b95f2ae0ed01d870 | [
"BSD-3-Clause"
] | null | null | null | pyvttbl/stats/_stats.py | yk/pyvttbl | af66c1aba410ba5386249cd5b95f2ae0ed01d870 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 1999-2007 Gary Strangman; All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Comments and/or additions are welcome (send e-mail to:
# strang@nmr.mgh.harvard.edu).
#
"""
stats.py module
(Requires _pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: Dec 18, 2007 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
## CHANGE LOG:
## ===========
## 07-11.26 ... conversion for numpy started
## 07-05-16 ... added Lin's Concordance Correlation Coefficient (alincc) and acov
## 05-08-21 ... added "Dice's coefficient"
## 04-10-26 ... added ap2t(), an ugly fcn for converting p-vals to T-vals
## 04-04-03 ... added amasslinregress() function to do regression on N-D arrays
## 03-01-03 ... CHANGED VERSION TO 0.6
## fixed atsem() to properly handle limits=None case
## improved histogram and median functions (estbinwidth) and
## fixed atvar() function (wrong answers for neg numbers?!?)
## 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
## 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
## 00-12-28 ... removed aanova() to separate module, fixed licensing to
## match Python License, fixed doc string & imports
## 00-04-13 ... pulled all "global" statements, except from aanova()
## added/fixed lots of documentation, removed io.py dependency
## changed to version 0.5
## 99-11-13 ... added asign() function
## 99-11-01 ... changed version to 0.4 ... enough incremental changes now
## 99-10-25 ... added acovariance and acorrelation functions
## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
## added aglm function (crude, but will be improved)
## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
## all handle lists of 'dimension's and keepdims
## REMOVED ar0, ar2, ar3, ar4 and replaced them with around
## reinserted fixes for abetai to avoid math overflows
## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
## handle multi-dimensional arrays (whew!)
## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
## added anormaltest per same reference
## re-wrote azprob to calc arrays of probs all at once
## 99-08-22 ... edited attest_ind printing section so arrays could be rounded
## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
## short/byte arrays (mean of #s btw 100-300 = -150??)
## 99-08-09 ... fixed asum so that the None case works for Byte arrays
## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
## 04/11/99 ... added asignaltonoise, athreshold functions, changed all
## max/min in array section to N.maximum/N.minimum,
## fixed square_of_sums to prevent integer overflow
## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
## 02/28/99 ... Fixed aobrientransform to return an array rather than a list
## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
## 01/13/99 ... CHANGED TO VERSION 0.3
## fixed bug in a/lmannwhitneyu p-value calculation
## 12/31/98 ... fixed variable-name bug in ldescribe
## 12/19/98 ... fixed bug in findwithin (fcns needed _pstat. prefix)
## 12/16/98 ... changed amedianscore to return float (not array) for 1 score
## 12/14/98 ... added atmin and atmax functions
## removed umath from import line (not needed)
## l/ageometricmean modified to reduce chance of overflows (take
## nth root first, then multiply)
## 12/07/98 ... added __version__variable (now 0.2)
## removed all 'stats.' from anova() fcn
## 12/06/98 ... changed those functions (except shellsort) that altered
## arguments in-place ... cumsum, ranksort, ...
## updated (and fixed some) doc-strings
## 12/01/98 ... added anova() function (requires NumPy)
## incorporated Dispatch class
## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
## added 'asum' function (added functionality to N.add.reduce)
## fixed both moment and amoment (two errors)
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
from . import _pstat # required 3rd party module
import math, string, copy # required python modules
from types import *
__version__ = 0.6
############# DISPATCH CODE ##############
class Dispatch:
"""
The Dispatch class, care of David Ascher, allows different functions to
be called depending on the argument types. This way, there can be one
function name regardless of the argument type. To access function doc
in stats.py module, prefix the function with an 'l' or 'a' for list or
array arguments, respectively. That is, print stats.lmean.__doc__ or
print stats.amean.__doc__ or whatever.
"""
def __init__(self, *tuples):
self._dispatch = {}
for func, types in tuples:
for t in types:
if t in list(self._dispatch.keys()):
raise ValueError("can't have two dispatches on "+str(t))
self._dispatch[t] = func
self._types = list(self._dispatch.keys())
def __call__(self, arg1, *args, **kw):
if type(arg1) not in self._types:
raise TypeError("don't know how to dispatch %s arguments" % type(arg1))
return self._dispatch[type(arg1)](*(arg1,) + args, **kw)
##########################################################################
######################## LIST-BASED FUNCTIONS ########################
##########################################################################
### Define these regardless
####################################
####### CENTRAL TENDENCY #########
####################################
def lgeometricmean (inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult
def lharmonicmean (inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum
def lmean (inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
def lmedian (inlist,numbins=1000):
"""
Returns the computed median value of a list of numbers, given the
number of bins to use for the histogram (more bins brings the computed value
closer to the median score, default number of bins = 1000). See G.W.
Heiman's Basic Stats (1st Edition), or CRC Probability & Statistics.
Usage: lmedian (inlist, numbins=1000)
"""
(hist, smallest, binsize, extras) = histogram(inlist,numbins,[min(inlist),max(inlist)]) # make histog
cumhist = cumsum(hist) # make cumulative histogram
for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
if cumhist[i]>=len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median
def lmedianscore (inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist)/2 # integer division correct
median = float(newlist[index] + newlist[index-1]) /2
else:
index = len(newlist)/2 # int divsion gives mid value when count from 0
median = newlist[index]
return median
def lmode(inlist):
"""
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
scores = _pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
maxfreq = max(freq)
mode = []
stillmore = 1
while stillmore:
try:
indx = freq.index(maxfreq)
mode.append(scores[indx])
del freq[indx]
del scores[indx]
except ValueError:
stillmore=0
return maxfreq, mode
####################################
############ MOMENTS #############
####################################
def lmoment(inlist,moment=1):
"""
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
if moment == 1:
return 0.0
else:
mn = mean(inlist)
n = len(inlist)
s = 0
for x in inlist:
s = s + (x-mn)**moment
return s/float(n)
def lvariation(inlist):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
return 100.0*samplestdev(inlist)/float(mean(inlist))
def lskew(inlist):
"""
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
return moment(inlist,3)/pow(moment(inlist,2),1.5)
def lkurtosis(inlist):
"""
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
return moment(inlist,4)/pow(moment(inlist,2),2.0)
def ldescribe(inlist):
"""
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
n = len(inlist)
mm = (min(inlist),max(inlist))
m = mean(inlist)
sd = stdev(inlist)
sk = skew(inlist)
kurt = kurtosis(inlist)
return n, mm, m, sd, sk, kurt
####################################
####### FREQUENCY STATS ##########
####################################
def litemfreq(inlist):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = _pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return _pstat.abut(scores, freq)
def lscoreatpercentile (inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def lpercentileofscore (inlist, score,histbins=10,defaultlimits=None):
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
cumhist = cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits != None):
if type(defaultreallimits) not in [list,tuple] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.000001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) +1e-6 #1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def lcumfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
cumhist = cumsum(copy.deepcopy(h))
return cumhist,l,b,e
def lrelfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h,l,b,e
####################################
##### VARIABILITY FUNCTIONS ######
####################################
def lobrientransform(args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = [0.0]*k
v = [0.0]*k
m = [0.0]*k
nargs = []
for i in range(k):
nargs.append(copy.deepcopy(args[i]))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(int(n[j])):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Problem in obrientransform.')
else:
return nargs
def lsamplevar (inlist):
"""
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = []
for item in inlist:
deviations.append(item-mn)
return ss(deviations)/float(n)
def lsamplestdev (inlist):
"""
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
return math.sqrt(samplevar(inlist))
def lcov (x,y, keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: lcov(x,y,keepdims=0)
"""
n = len(x)
xmn = mean(x)
ymn = mean(y)
xdeviations = [0]*len(x)
ydeviations = [0]*len(y)
for i in range(len(x)):
xdeviations[i] = x[i] - xmn
ydeviations[i] = y[i] - ymn
ss = 0.0
for i in range(len(xdeviations)):
ss = ss + xdeviations[i]*ydeviations[i]
return ss/float(n-1)
def lvar (inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0]*len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations)/float(n-1)
def lstdev (inlist):
"""
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
return math.sqrt(var(inlist))
def lsterr(inlist):
"""
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
return stdev(inlist) / float(math.sqrt(len(inlist)))
def lsem (inlist):
"""
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n)
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
def lzs (inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist,item))
return zscores
####################################
####### TRIMMING FUNCTIONS #######
####################################
def ltrimboth (l,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
proportion results in a non-integer slice index (i.e., conservatively
slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut]
def ltrim1 (l,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut]
####################################
##### CORRELATION FUNCTIONS ######
####################################
def lpaired(x,y):
"""
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = input()
if samples in ['i','I','r','R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(_pstat.colex(r,0),_pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print(vartype)
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,0)
print('\nIndependent samples t-test: ', round(t,4),round(p,4))
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print('\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4))
else:
u,p = mannwhitneyu(x,y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4))
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print('\nRelated samples t-test: ', round(t,4),round(p,4))
else:
t,p = ranksums(x,y)
print('\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
_pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ",round(r,4),round(p,4))
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ',round(r,4),round(p,4))
print('\n\n')
return None
def lpearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) != len(y):
raise ValueError('Input values not paired in pearsonr. Aborting.')
n = len(x)
x = list(map(float,x))
y = list(map(float,y))
xmean = mean(x)
ymean = mean(y)
r_num = n*(summult(x,y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/float(df+t*t))
return r, prob
def llincc(x,y):
"""
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
covar = lcov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
xvar = lvar(x)*(len(x)-1)/float(len(x)) # correct denom to n
yvar = lvar(y)*(len(y)-1)/float(len(y)) # correct denom to n
lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
return lincc
def lspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('Input values not paired in spearmanr. Aborting.')
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs
def lpointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
if len(x) != len(y):
raise ValueError('INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.')
data = _pstat.abut(x,y)
categories = _pstat.unique(x)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required for pointbiserialr().")
else: # there are 2 categories, continue
codemap = _pstat.abut(categories,list(range(2)))
recoded = _pstat.recode(data,codemap,0)
x = _pstat.linexand(data,0,categories[0])
y = _pstat.linexand(data,0,categories[1])
xmean = mean(_pstat.colex(x,1))
ymean = mean(_pstat.colex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(_pstat.colex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
return rpb, prob
def lkendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = list(map(float,x))
y = list(map(float,y))
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
####################################
##### INFERENTIAL STATISTICS #####
####################################
def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
x = mean(a)
v = var(a)
n = len(a)
df = n-1
svar = ((n-1)*v)/float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = betai(0.5*df,0.5,float(df)/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,min(a),max(a),
statname,t,prob)
return t, prob, n, df, x, v
def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,min(a),max(a),
name2,n2,x2,v2,min(b),max(b),
statname,t,prob)
return t, prob, n1, n2, df, x1, x2, v1, v2, svar
def ttest_ind_uneq (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b assuming unequal variances. degrees of freedom are adjusted
based on the Welch-Satterthwaite equation. If printit='filename', the
results are output to 'filename' using the given writemode (default=append).
Returns t-value, and prob.
Usage: lttest_ind_uneq(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
t = (x1-x2)/math.sqrt(v1/n1 + v2/n2)
df = (v1/n1 + v2/n2)**2
df /= ((v1/n1)**2/(n1-1.) +(v2/n2)**2/(n2-1.))
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Independent samples T-test unequal variance.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,min(a),max(a),
name2,n2,x2,v2,min(b),max(b),
statname,t,prob)
return t, prob, n1, n2, df, x1, x2, v1, v2
def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
if len(a)!=len(b):
raise ValueError('Unequal length lists in ttest_rel.')
x1 = mean(a)
x2 = mean(b)
v1 = var(a)
v2 = var(b)
n = len(a)
cov = 0
for i in range(len(a)):
cov = cov + (a[i]-x1) * (b[i]-x2)
df = n-1
cov = cov / float(df)
sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
t = (x1-x2)/sd
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,min(a),max(a),
name2,n,x2,v2,min(b),max(b),
statname,t,prob)
return t, prob, n, df, x1, x2, v1, v2
def lchisquare(f_obs, f_exp=None, df=None):
"""
Calculates a one-way pearsonchi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp == None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq += (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
if df == None:
df = k-1
return chisq, chisqprob(chisq, df), df, f_exp
def llnchisquare(f_obs,f_exp=None, df=None):
"""
Calculates a one-way log-likelihood chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp == None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq += f_obs[i]*math.log(f_obs[i]/float(f_exp[i]))
chisq *= 2
if df == None:
df = k-1
return chisq, chisqprob(chisq, df), df, f_exp
def lks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob
def lmannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U found in the tables. Equivalent to Kruskal-Wallis H with
just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in lmannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def ltiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def lranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
def lwilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxont. Aborting.')
d=[]
for i in range(len(x)):
diff = x[i] - y[i]
if diff != 0:
d.append(diff)
count = len(d)
absd = list(map(abs,d))
absranked = rankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = list(map(len,args))
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h,df)
def lfriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
data = _pstat.abut(*tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1)
####################################
#### PROBABILITY CALCULATIONS ####
####################################
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def lzprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
def lbetacf(a,b,x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold)<(EPS*abs(az))):
return az
print('a or b too big, or ITMAX too small in Betacf.')
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
def lbetai(a,b,x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x<0.0 or x>1.0):
raise ValueError('Bad x in lbetai')
if (x==0.0 or x==1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b*
math.log(1.0-x))
if (x<(a+1.0)/(a+b+2.0)):
return bt*betacf(a,b,x)/float(a)
else:
return 1.0-bt*betacf(b,a,1.0-x)/float(b)
####################################
####### ANOVA CALCULATIONS #######
####################################
def lF_oneway(lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
## means = [0]*a
## vars = [0]*a
## ns = [0]*a
alldata = []
## tmp = map(N.array,lists)
## means = map(amean,tmp)
## vars = map(avar,tmp)
means = list(map(mean, lists))
vars = list(map(var, lists))
ns = list(map(len,lists))
for i in range(len(lists)):
alldata = alldata + lists[i]
## alldata = N.array(alldata)
bign = len(alldata)
sstot = ss(alldata)-(square_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + square_of_sums(list)/float(len(list))
ssbn = ssbn - (square_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob, ns, means, vars, ssbn, sswn, dfbn, dfwn
def lF_value (ER,EF,dfnum,dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR-dfF = degrees of freedom of the numerator
dfF = degrees of freedom associated with the denominator/Full model
Usage: lF_value(ER,EF,dfnum,dfden)
"""
return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
####################################
######## SUPPORT FUNCTIONS #######
####################################
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [list,tuple]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = _pstat.colex(list2print,col)
items = list(map(_pstat.makestr,items))
maxsize[col] = max(list(map(len,items))) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(_pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(_pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def lincr(l,cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l
def lsum (inlist):
"""
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
s = 0
for item in inlist:
s = s + item
return s
def lcumsum (inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1,len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist
def lss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
def lsummult (list1,list2):
"""
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
if len(list1) != len(list2):
raise ValueError("Lists not equal length in summult.")
s = 0
for item1,item2 in _pstat.abut(list1,list2):
s = s + item1*item2
return s
def lsumdiffsquared(x,y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
def lsquare_of_sums(inlist):
"""
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
s = sum(inlist)
return float(s)*s
def lshellsort(inlist):
"""
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
n = len(inlist)
svec = copy.deepcopy(inlist)
ivec = list(range(n))
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def lrankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0]*n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)!=StringType or len(fname)==0:
print()
print(statname)
print()
_pstat.printcc(lofl)
print()
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print('Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix)
print()
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(_pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None
def lfindwithin (data):
"""
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
Note: input data is in |Stat format ... a list of lists ("2D list") with
one row per measured value, first column=subject identifier, last column=
score, one in-between column per factor (these columns contain level
designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
numfact = len(data[0])-1
withinvec = 0
for col in range(1,numfact):
examplelevel = _pstat.unique(_pstat.colex(data,col))[0]
rows = _pstat.linexand(data,col,examplelevel) # get 1 level of this factor
factsubjs = _pstat.unique(_pstat.colex(rows,0))
allsubjs = _pstat.unique(_pstat.colex(data,0))
if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
withinvec = withinvec + (1 << col)
return withinvec
#########################################################
#########################################################
####### DISPATCH LISTS AND TUPLES TO ABOVE FCNS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (list, tuple)), )
harmonicmean = Dispatch ( (lharmonicmean, (list, tuple)), )
mean = Dispatch ( (lmean, (list, tuple)), )
median = Dispatch ( (lmedian, (list, tuple)), )
medianscore = Dispatch ( (lmedianscore, (list, tuple)), )
mode = Dispatch ( (lmode, (list, tuple)), )
## MOMENTS:
moment = Dispatch ( (lmoment, (list, tuple)), )
variation = Dispatch ( (lvariation, (list, tuple)), )
skew = Dispatch ( (lskew, (list, tuple)), )
kurtosis = Dispatch ( (lkurtosis, (list, tuple)), )
describe = Dispatch ( (ldescribe, (list, tuple)), )
## FREQUENCY STATISTICS:
itemfreq = Dispatch ( (litemfreq, (list, tuple)), )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (list, tuple)), )
percentileofscore = Dispatch ( (lpercentileofscore, (list, tuple)), )
histogram = Dispatch ( (lhistogram, (list, tuple)), )
cumfreq = Dispatch ( (lcumfreq, (list, tuple)), )
relfreq = Dispatch ( (lrelfreq, (list, tuple)), )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (list, tuple)), )
samplevar = Dispatch ( (lsamplevar, (list, tuple)), )
samplestdev = Dispatch ( (lsamplestdev, (list, tuple)), )
var = Dispatch ( (lvar, (list, tuple)), )
stdev = Dispatch ( (lstdev, (list, tuple)), )
sterr = Dispatch ( (lsterr, (list, tuple)), )
sem = Dispatch ( (lsem, (list, tuple)), )
z = Dispatch ( (lz, (list, tuple)), )
zs = Dispatch ( (lzs, (list, tuple)), )
## TRIMMING FCNS:
trimboth = Dispatch ( (ltrimboth, (list, tuple)), )
trim1 = Dispatch ( (ltrim1, (list, tuple)), )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (list, tuple)), )
pearsonr = Dispatch ( (lpearsonr, (list, tuple)), )
spearmanr = Dispatch ( (lspearmanr, (list, tuple)), )
pointbiserialr = Dispatch ( (lpointbiserialr, (list, tuple)), )
kendalltau = Dispatch ( (lkendalltau, (list, tuple)), )
linregress = Dispatch ( (llinregress, (list, tuple)), )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (list, tuple)), )
ttest_ind = Dispatch ( (lttest_ind, (list, tuple)), )
ttest_rel = Dispatch ( (lttest_rel, (list, tuple)), )
chisquare = Dispatch ( (lchisquare, (list, tuple)), )
ks_2samp = Dispatch ( (lks_2samp, (list, tuple)), )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (list, tuple)), )
ranksums = Dispatch ( (lranksums, (list, tuple)), )
tiecorrect = Dispatch ( (ltiecorrect, (list, tuple)), )
wilcoxont = Dispatch ( (lwilcoxont, (list, tuple)), )
kruskalwallish = Dispatch ( (lkruskalwallish, (list, tuple)), )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (list, tuple)), )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (int, float)), )
zprob = Dispatch ( (lzprob, (int, float)), )
ksprob = Dispatch ( (lksprob, (int, float)), )
fprob = Dispatch ( (lfprob, (int, float)), )
betacf = Dispatch ( (lbetacf, (int, float)), )
betai = Dispatch ( (lbetai, (int, float)), )
erfcc = Dispatch ( (lerfcc, (int, float)), )
gammln = Dispatch ( (lgammln, (int, float)), )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (list, tuple)), )
F_value = Dispatch ( (lF_value, (list, tuple)), )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (list, tuple)), )
sum = Dispatch ( (lsum, (list, tuple)), )
cumsum = Dispatch ( (lcumsum, (list, tuple)), )
ss = Dispatch ( (lss, (list, tuple)), )
summult = Dispatch ( (lsummult, (list, tuple)), )
square_of_sums = Dispatch ( (lsquare_of_sums, (list, tuple)), )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (list, tuple)), )
shellsort = Dispatch ( (lshellsort, (list, tuple)), )
rankdata = Dispatch ( (lrankdata, (list, tuple)), )
findwithin = Dispatch ( (lfindwithin, (list, tuple)), )
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import numpy as N
import numpy.linalg as LA
#####################################
######## ACENTRAL TENDENCY ########
#####################################
def ageometricmean (inarray,dimension=None,keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
inarray = N.array(inarray,N.float_)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult)
elif type(dimension) in [int,float]:
size = inarray.shape[dimension]
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult,dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
size = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
mult = N.power(inarray,1.0/size)
for dim in dims:
mult = N.multiply.reduce(mult,dim)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
mult = N.reshape(mult,shp)
return mult
def aharmonicmean (inarray,dimension=None,keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: aharmonicmean(inarray,dimension=None,keepdims=0)
Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.float_)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
s = N.add.reduce(1.0 / inarray)
elif type(dimension) in [int,float]:
size = float(inarray.shape[dimension])
s = N.add.reduce(1.0/inarray, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
nondims = []
for i in range(len(inarray.shape)):
if i not in dims:
nondims.append(i)
tinarray = N.transpose(inarray,nondims+dims) # put keep-dims first
idx = [0] *len(nondims)
if idx == []:
size = len(N.ravel(inarray))
s = asum(1.0 / inarray)
if keepdims == 1:
s = N.reshape([s],N.ones(len(inarray.shape)))
else:
idx[0] = -1
loopcap = N.array(tinarray.shape[0:len(nondims)]) -1
s = N.zeros(loopcap+1,N.float_)
while incr(idx,loopcap) != -1:
s[idx] = asum(1.0/tinarray[idx])
size = N.multiply.reduce(N.take(inarray.shape,dims))
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return size / s
def amean (inarray,dimension=None,keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
if inarray.dtype in [N.int_, N.short,N.ubyte]:
inarray = inarray.astype(N.float_)
if dimension == None:
inarray = N.ravel(inarray)
sum = N.add.reduce(inarray)
denom = float(len(inarray))
elif type(dimension) in [int,float]:
sum = asum(inarray,dimension)
denom = float(inarray.shape[dimension])
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a TUPLE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
sum = inarray *1.0
for dim in dims:
sum = N.add.reduce(sum,dim)
denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
sum = N.reshape(sum,shp)
return sum/denom
def amedian (inarray,numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
precise median value of the array; default number of bins = 1000). From
G.W. Heiman's Basic Stats, or CRC Probability & Statistics.
NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first).
Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
(hist, smallest, binsize, extras) = ahistogram(inarray,numbins,[min(inarray),max(inarray)])
cumhist = N.cumsum(hist) # make cumulative histogram
otherbins = N.greater_equal(cumhist,len(inarray)/2.0)
otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def amedianscore (inarray,dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can
be None, to pre-flatten the array, or else dimension must equal 0).
Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
inarray = N.sort(inarray,dimension)
if inarray.shape[dimension] % 2 == 0: # if even number of elements
indx = inarray.shape[dimension]/2 # integer division correct
median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
else:
indx = inarray.shape[dimension] / 2 # integer division correct
median = N.take(inarray,[indx],dimension)
if median.shape == (1,):
median = median[0]
return median
def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
The bin-count for the modal values is also returned. Operates on whole
array (dimension=None), or on a given dimension.
Usage: amode(a, dimension=None)
Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
scores = _pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
template = N.equal(a,score)
counts = asum(template,dimension,1)
mostfrequent = N.where(counts>oldcounts,score,oldmostfreq)
oldcounts = N.where(counts>oldcounts,counts,oldcounts)
oldmostfreq = mostfrequent
return oldcounts, mostfrequent
def atmean(a,limits=None,inclusive=(1,1)):
"""
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
if a.dtype in [N.int_, N.short,N.ubyte]:
a = a.astype(N.float_)
if limits == None:
return mean(a)
assert type(limits) in [list,tuple,N.ndarray], "Wrong type for limits in atmean"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atmean).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
s = float(N.add.reduce(N.ravel(a*mask)))
n = float(N.add.reduce(N.ravel(mask)))
return s/n
def atvar(a,limits=None,inclusive=(1,1)):
"""
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
can be set to None. The inclusive list/tuple determines whether the lower
and upper limiting bounds (respectively) are open/exclusive (0) or
closed/inclusive (1). ASSUMES A FLAT ARRAY (OR ELSE PREFLATTENS).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
a = a.astype(N.float_)
if limits == None or limits == [None,None]:
return avar(a)
assert type(limits) in [list,tuple,N.ndarray], "Wrong type for limits in atvar"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atvar).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
a = N.compress(mask,a) # squish out excluded values
return avar(a)
def atmin(a,lowerlimit=None,dimension=None,inclusive=1):
"""
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
if inclusive: lowerfcn = N.greater
else: lowerfcn = N.greater_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if lowerlimit == None:
lowerlimit = N.minimum.reduce(N.ravel(a))-11
biggest = N.maximum.reduce(N.ravel(a))
ta = N.where(lowerfcn(a,lowerlimit),a,biggest)
return N.minimum.reduce(ta,dimension)
def atmax(a,upperlimit,dimension=None,inclusive=1):
"""
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
if inclusive: upperfcn = N.less
else: upperfcn = N.less_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if upperlimit == None:
upperlimit = N.maximum.reduce(N.ravel(a))+1
smallest = N.minimum.reduce(N.ravel(a))
ta = N.where(upperfcn(a,upperlimit),a,smallest)
return N.maximum.reduce(ta,dimension)
def atstdev(a,limits=None,inclusive=(1,1)):
"""
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
return N.sqrt(tvar(a,limits,inclusive))
def atsem(a,limits=None,inclusive=(1,1)):
"""
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
or the value of limits itself, can be set to None. The inclusive list/tuple
determines whether the lower and upper limiting bounds (respectively) are
open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
sd = tstdev(a,limits,inclusive)
if limits == None or limits == [None,None]:
n = float(len(N.ravel(a)))
limits = [min(a)-1, max(a)+1]
assert type(limits) in [list,tuple,N.ndarray], "Wrong type for limits in atsem"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError("No array values within given limits (atsem).")
elif limits[0]==None and limits[1]!=None:
mask = upperfcn(a,limits[1])
elif limits[0]!=None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]!=None and limits[1]!=None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask)))
return sd/math.sqrt(n)
#####################################
############ AMOMENTS #############
#####################################
def amoment(a,moment=1,dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
kurtosis. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions).
Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if moment == 1:
return 0.0
else:
mn = amean(a,dimension,1) # 1=keepdims
s = N.power((a-mn),moment)
return amean(s,dimension)
def avariation(a,dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
return 100.0*asamplestdev(a,dimension)/amean(a,dimension)
def askew(a,dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),1.5)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) != 0:
print("Number of zeros in askew: ",asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a,3,dimension)/denom)
def akurtosis(a,dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
to see if it's close enough. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),2)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) != 0:
print("Number of zeros in akurtosis: ",asum(zero))
denom = denom + zero # prevent divide-by-zero
return N.where(zero,0,amoment(a,4,dimension)/denom)
def adescribe(inarray,dimension=None):
"""
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
n = inarray.shape[dimension]
mm = (N.minimum.reduce(inarray),N.maximum.reduce(inarray))
m = amean(inarray,dimension)
sd = astdev(inarray,dimension)
skew = askew(inarray,dimension)
kurt = akurtosis(inarray,dimension)
return n, mm, m, sd, skew, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
def askewtest(a,dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions).
Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
b2 = askew(a,dimension)
n = float(a.shape[dimension])
y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
W2 = -1 + N.sqrt(2*(beta2-1))
delta = 1/N.sqrt(N.log(N.sqrt(W2)))
alpha = N.sqrt(2/(W2-1))
y = N.where(y==0,1,y)
Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
return Z, (1.0-zprob(Z))*2
def akurtosistest(a,dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
(ravel array first), an integer (the dimension over which to operate),
or a sequence (operate over multiple dimensions).
Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
n = float(a.shape[dimension])
if n<20:
print("akurtosistest only valid for n>=20 ... continuing anyway, n=",n)
b2 = akurtosis(a,dimension)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/N.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))/
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 -2/(9.0*A)
denom = 1 +x*N.sqrt(2/(A-4.0))
denom = N.where(N.less(denom,0), 99, denom)
term2 = N.where(N.equal(denom,0), term1, N.power((1-2.0/A)/denom,1/3.0))
Z = ( term1 - term2 ) / N.sqrt(2/(9.0*A))
Z = N.where(N.equal(denom,99), 0, Z)
return Z, (1.0-zprob(Z))*2
def anormaltest(a,dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
None (ravel array first), an integer (the dimension over which to
operate), or a sequence (operate over multiple dimensions).
Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
s,p = askewtest(a,dimension)
k,p = akurtosistest(a,dimension)
k2 = N.power(s,2) + N.power(k,2)
return k2, achisqprob(k2,2)
#####################################
###### AFREQUENCY FUNCTIONS #######
#####################################
def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
@@@sorting OK?
Usage: aitemfreq(a)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = _pstat.aunique(a)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
freq[i] = N.add.reduce(N.equal(a,scores[i]))
return N.array(_pstat.aabut(scores, freq))
def ascoreatpercentile (inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0<percent<100
Returns: score at given percentile, relative to inarray distribution
"""
percent = percent / 100.0
targetcf = percent*len(inarray)
h, lrl, binsize, extras = histogram(inarray)
cumhist = cumsum(h*1)
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def apercentileofscore (inarray,score,histbins=10,defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
h, lrl, binsize, extras = histogram(inarray,histbins,defaultlimits)
cumhist = cumsum(h*1)
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
return pct
def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. Defaultlimits
can be None (the routine picks bins spanning all the numbers in the
inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the
following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits != None):
lowerreallimit = defaultlimits[0]
upperreallimit = defaultlimits[1]
binsize = (upperreallimit-lowerreallimit) / float(numbins)
else:
Min = N.minimum.reduce(inarray)
Max = N.maximum.reduce(inarray)
estbinwidth = float(Max - Min)/float(numbins) + 1e-6
binsize = (Max-Min+estbinwidth)/float(numbins)
lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit) / float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print('\nPoints outside given histogram range =',extrapoints)
return (bins, lowerreallimit, binsize, extrapoints)
def acumfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
cumhist = cumsum(h*1)
return cumhist,l,b,e
def arelfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
h = N.array(h/float(a.shape[0]))
return h,l,b,e
#####################################
###### AVARIABILITY FUNCTIONS #####
#####################################
def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = N.zeros(k,N.float_)
v = N.zeros(k,N.float_)
m = N.zeros(k,N.float_)
nargs = []
for i in range(k):
nargs.append(args[i].astype(N.float_))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check != 1:
raise ValueError('Lack of convergence in obrientransform.')
else:
return N.array(nargs)
def asamplevar (inarray,dimension=None,keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if dimension == 1:
mn = amean(inarray,dimension)[:,N.NewAxis]
else:
mn = amean(inarray,dimension,keepdims=1)
deviations = inarray - mn
if type(dimension) == list:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
svar = ass(deviations,dimension,keepdims) / float(n)
return svar
def asamplestdev (inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(asamplevar(inarray,dimension,keepdims))
def asignaltonoise(instack,dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
m = mean(instack,dimension)
sd = stdev(instack,dimension)
return N.where(sd==0,0,m/sd)
def acov (x,y, dimension=None,keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: acov(x,y,dimension=None,keepdims=0)
"""
if dimension == None:
x = N.ravel(x)
y = N.ravel(y)
dimension = 0
xmn = amean(x,dimension,1) # keepdims
xdeviations = x - xmn
ymn = amean(y,dimension,1) # keepdims
ydeviations = y - ymn
if type(dimension) == list:
n = 1
for d in dimension:
n = n*x.shape[d]
else:
n = x.shape[dimension]
covar = N.sum(xdeviations*ydeviations)/float(n-1)
return covar
def avar (inarray, dimension=None,keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
mn = amean(inarray,dimension,1)
deviations = inarray - mn
if type(dimension) == list:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
var = ass(deviations,dimension,keepdims)/float(n-1)
return var
def astdev (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(avar(inarray,dimension,keepdims))
def asterr (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return astdev(inarray,dimension,keepdims) / float(N.sqrt(inarray.shape[dimension]))
def asem (inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if type(dimension) == list:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
s = asamplestdev(inarray,dimension,keepdims) / N.sqrt(n-1)
return s
def az (a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
arrays > 1D.
Usage: az(a, score)
"""
z = (score-amean(a)) / asamplestdev(a)
return z
def azs (a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
Usage: azs(a)
"""
zscores = []
for item in a:
zscores.append(z(a,item))
return N.array(zscores)
def azmap (scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
mns = amean(compare,dimension)
sstd = asamplestdev(compare,0)
return (scores - mns) / sstd
#####################################
####### ATRIMMING FUNCTIONS #######
#####################################
## deleted around() as it's in numpy now
def athreshold(a,threshmin=None,threshmax=None,newval=0):
"""
Like Numeric.clip() except that values <threshmid or >threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
Usage: athreshold(a,threshmin=None,threshmax=None,newval=0)
Returns: a, with values <threshmin or >threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin != None:
mask = mask + N.where(a<threshmin,1,0)
if threshmax != None:
mask = mask + N.where(a>threshmax,1,0)
mask = N.clip(mask,0,1)
return N.where(mask,newval,a)
def atrimboth (a,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
'rightmost' 10% of scores. You must pre-sort the array if you want
"proper" trimming. Slices off LESS if proportion results in a
non-integer slice index (i.e., conservatively slices off
proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
def atrim1 (a,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif string.lower(tail) == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
#####################################
##### ACORRELATION FUNCTIONS ######
#####################################
def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) != 2:
raise TypeError("acovariance requires 2D matrices")
n = X.shape[0]
mX = amean(X,0)
return N.dot(N.transpose(X),X) / float(n) - N.multiply.outer(mX,mX)
def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
Usage: acorrelation(X)
Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
return C / N.sqrt(N.multiply.outer(V,V))
def apaired(x,y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
samples = input()
if samples in ['i','I','r','R']:
print('\nComparing variances ...', end=' ')
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(_pstat.colex(r,0),_pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print(vartype)
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,None,0)
print('\nIndependent samples t-test: ', round(t,4),round(p,4))
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print('\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4))
else:
u,p = mannwhitneyu(x,y)
print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4))
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print('\nRelated samples t-test: ', round(t,4),round(p,4))
else:
t,p = ranksums(x,y)
print('\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4))
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
corrtype = input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print('\nLinear regression for continuous variables ...')
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
_pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print('\nCorrelation for ranked variables ...')
print("Spearman's r: ",round(r,4),round(p,4))
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print('\nAssuming x contains a dichotomous variable ...')
print('Point Biserial r: ',round(r,4),round(p,4))
print('\n\n')
return None
def dices(x,y):
"""
Calculates Dice's coefficient ... (2*number of common terms)/(number of terms in x +
number of terms in y). Returns a value between 0 (orthogonal) and 1.
Usage: dices(x,y)
"""
import sets
x = sets.Set(x)
y = sets.Set(y)
common = len(x.intersection(y))
total = float(len(x) + len(y))
return 2*common/total
def icc(x,y=None,verbose=0):
"""
Calculates intraclass correlation coefficients using simple, Type I sums of squares.
If only one variable is passed, assumed it's an Nx2 matrix
Usage: icc(x,y=None,verbose=0)
Returns: icc rho, prob ####PROB IS A GUESS BASED ON PEARSON
"""
TINY = 1.0e-20
if y:
all = N.concatenate([x,y],0)
else:
all = x+0
x = all[:,0]
y = all[:,1]
totalss = ass(all-mean(all))
pairmeans = (x+y)/2.
withinss = ass(x-pairmeans) + ass(y-pairmeans)
withindf = float(len(x))
betwdf = float(len(x)-1)
withinms = withinss / withindf
betweenms = (totalss-withinss) / betwdf
rho = (betweenms-withinms)/(withinms+betweenms)
t = rho*math.sqrt(betwdf/((1.0-rho+TINY)*(1.0+rho+TINY)))
prob = abetai(0.5*betwdf,0.5,betwdf/(betwdf+t*t),verbose)
return rho, prob
def alincc(x,y):
"""
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
x = N.ravel(x)
y = N.ravel(y)
covar = acov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
xvar = avar(x)*(len(x)-1)/float(len(x)) # correct denom to n
yvar = avar(y)*(len(y)-1)/float(len(y)) # correct denom to n
lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
return lincc
def apearsonr(x,y,verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays
Returns: Pearson's r, two-tailed p-value
"""
TINY = 1.0e-20
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = (r_num / r_den)
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t),verbose)
return r,prob
def aspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: aspearmanr(x,y) where x,y are equal-length arrays
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = N.add.reduce((rankx-ranky)**2)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = abetai(0.5*df,0.5,df/(df+t*t))
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
def apointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: apointbiserialr(x,y) where x,y are equal length arrays
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = _pstat.aunique(x)
data = _pstat.aabut(x,y)
if len(categories) != 2:
raise ValueError("Exactly 2 categories required (in x) for pointbiserialr().")
else: # there are 2 categories, continue
codemap = _pstat.aabut(categories,N.arange(2))
recoded = _pstat.arecode(data,codemap,0)
x = _pstat.alinexand(data,0,categories[0])
y = _pstat.alinexand(data,0,categories[1])
xmean = amean(_pstat.acolex(x,1))
ymean = amean(_pstat.acolex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/asamplestdev(_pstat.acolex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
return rpb, prob
def akendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
Usage: akendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither array has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
and splits data into x,y pairs along that dim.
Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:,0]
y = args[:,1]
else:
x = args[0]
y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*asamplestdev(y)
return slope, intercept, r, prob, sterrest, n
def amasslinregress(*args):
"""
Calculates a regression line on one 1D array (x) and one N-D array (y).
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = N.ravel(args[0])
y = args[1]
else:
x = N.ravel(args[:,0])
y = args[:,1]
else:
x = args[0]
y = args[1]
x = x.astype(N.float_)
y = y.astype(N.float_)
n = len(x)
xmean = amean(x)
ymean = amean(y,0)
shp = N.ones(len(y.shape))
shp[0] = len(x)
x.shape = shp
print(x.shape, y.shape)
r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)
r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))
zerodivproblem = N.equal(r_den,0)
r_den = N.where(zerodivproblem,1,r_den) # avoid zero-division in 1st place
r = r_num / r_den # need to do this nicely for matrix division
r = N.where(zerodivproblem,0.0,r)
z = 0.5*N.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*N.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
ss = float(n)*ass(x)-asquare_of_sums(x)
s_den = N.where(ss==0,1,ss) # avoid zero-division in 1st place
slope = r_num / s_den
intercept = ymean - slope*xmean
sterrest = N.sqrt(1-r*r)*asamplestdev(y,0)
return slope, intercept, r, prob, sterrest, n
#####################################
##### AINFERENTIAL STATISTICS #####
#####################################
def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if type(a) != N.ndarray:
a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = abetai(0.5*df,0.5,df/(df+t*t))
if printit != 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
statname,t,prob)
return t,prob
def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_ind (a,b,dimension=None,printit=0,
Name1='Samp1',Name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
zerodivproblem = N.equal(svar,0)
svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place
t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ndarray:
probs = N.reshape(probs,t.shape)
if probs.shape == (1,):
probs = probs[0]
if printit != 0:
if type(t) == N.ndarray:
t = t[0]
if type(probs) == N.ndarray:
probs = probs[0]
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def ap2t(pval,df):
"""
Tries to compute a t-value from a p-value (or pval array) and associated df.
SLOW for large numbers of elements(!) as it re-computes p-values 20 times
(smaller step-sizes) at which point it decides it's done. Keeps the signs
of the input array. Returns 1000 (or -1000) if t>100.
Usage: ap2t(pval,df)
Returns: an array of t-values with the shape of pval
"""
pval = N.array(pval)
signs = N.sign(pval)
pval = abs(pval)
t = N.ones(pval.shape,N.float_)*50
step = N.ones(pval.shape,N.float_)*25
print("Initial ap2t() prob calc")
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
print('ap2t() iter: ', end=' ')
for i in range(10):
print(i,' ', end=' ')
t = N.where(pval<prob,t+step,t-step)
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
step = step/2
print()
# since this is an ugly hack, we get ugly boundaries
t = N.where(t>99.9,1000,t) # hit upper-boundary
t = t+signs
return t #, prob, pval
def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_rel(a,b,dimension=None,printit=0,
name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
if len(a)!=len(b):
raise ValueError('Unequal length arrays.')
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n = a.shape[dimension]
df = float(n-1)
d = (a-b).astype('d')
denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)
zerodivproblem = N.equal(denom,0)
denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place
t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ndarray:
probs = N.reshape(probs,t.shape)
if probs.shape == (1,):
probs = probs[0]
if printit != 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def achisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
@@@NOT RIGHT??
Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs)
if f_exp == None:
f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.float_)
f_exp = f_exp.astype(N.float_)
chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, achisqprob(chisq, k-1)
def aks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
j2 = 0 # N.zeros(data2.shape[1:])
fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)
fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)
n1 = data1.shape[0]
n2 = data2.shape[0]
en1 = n1*1
en2 = n2*1
d = N.zeros(data1.shape[1:],N.float_)
data1 = N.sort(data1,0)
data2 = N.sort(data2,0)
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if abs(dt) > abs(d):
d = dt
# try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
# except:
# prob = 1.0
return d, prob
def amannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(N.concatenate((x,y)))
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - azprob(z)
def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def aranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions
Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = N.concatenate((x,y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 - azprob(abs(z)))
return z, prob
def awilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) != len(y):
raise ValueError('Unequal N in awilcoxont. Aborting.')
d = x-y
d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H and associated p-value for 3 or more
independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
args = list(args)
n = [0]*len(args)
n = list(map(len,args))
all = []
for i in range(len(args)):
all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in akruskalwallish')
h = h / float(T)
return h, chisqprob(h,df)
def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
repeated measures and returns the result, along with the associated
probability value. It assumes 3 or more repeated measures. Only 3
levels requires a minimum of 10 subjects in the study. Four levels
requires 5 subjects per level(??).
Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
data = _pstat.aabut(*args)
data = data.astype(N.float_)
for i in range(len(data)):
data[i] = arankdata(data[i])
ssbn = asum(asum(args,1)**2)
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, achisqprob(chisq,k-1)
#####################################
#### APROBABILITY CALCULATIONS ####
#####################################
def achisqprob(chisq,df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
def ex(x):
BIG = 200.0
exponents = N.where(N.less(x,-BIG),-BIG,x)
return N.exp(exponents)
if type(chisq) == N.ndarray:
arrayflag = 1
else:
arrayflag = 0
chisq = N.array([chisq])
if df < 1:
return N.ones(chisq.shape,N.float)
probs = N.zeros(chisq.shape,N.float_)
probs = N.where(N.less_equal(chisq,0),1.0,probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
y = ex(-a)
if df%2 == 0:
even = 1
s = y*1
s2 = s*1
else:
even = 0
s = 2.0 * azprob(-N.sqrt(chisq))
s2 = s*1
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = N.ones(probs.shape,N.float_)
else:
z = 0.5 *N.ones(probs.shape,N.float_)
if even:
e = N.zeros(probs.shape,N.float_)
else:
e = N.log(N.sqrt(N.pi)) *N.ones(probs.shape,N.float_)
c = N.log(a)
mask = N.zeros(probs.shape)
a_big = N.greater(a,BIG)
a_big_frozen = -1 *N.ones(probs.shape,N.float_)
totalelements = N.multiply.reduce(N.array(probs.shape))
while asum(mask)!=totalelements:
e = N.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
# print z, e, s
newmask = N.greater(z,chisq)
a_big_frozen = N.where(newmask*N.equal(mask,0)*a_big, s, a_big_frozen)
mask = N.clip(newmask+mask,0,1)
if even:
z = N.ones(probs.shape,N.float_)
e = N.ones(probs.shape,N.float_)
else:
z = 0.5 *N.ones(probs.shape,N.float_)
e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape,N.float_)
c = 0.0
mask = N.zeros(probs.shape)
a_notbig_frozen = -1 *N.ones(probs.shape,N.float_)
while asum(mask)!=totalelements:
e = e * (a/z.astype(N.float_))
c = c + e
z = z + 1.0
# print '#2', z, e, c, s, c*y+s2
newmask = N.greater(z,chisq)
a_notbig_frozen = N.where(newmask*N.equal(mask,0)*(1-a_big),
c*y+s2, a_notbig_frozen)
mask = N.clip(newmask+mask,0,1)
probs = N.where(N.equal(probs,1),1,
N.where(N.greater(a,BIG),a_big_frozen,a_notbig_frozen))
return probs
else:
return s
def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
return N.where(N.greater_equal(x,0), ans, 2.0-ans)
def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
def yfunc(y):
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
return x
def wfunc(w):
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * N.sqrt(w) * 2.0
return x
Z_MAX = 6.0 # maximum meaningful z-value
x = N.zeros(z.shape,N.float_) # initialize
y = 0.5 * N.fabs(z)
x = N.where(N.less(y,1.0),wfunc(y*y),yfunc(y-2.0)) # get x's
x = N.where(N.greater(y,Z_MAX*0.5),1.0,x) # kill those with big Z
prob = N.where(N.greater(z,0),(x+1)*0.5,(1-x)*0.5)
return prob
def aksprob(alam):
"""
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
if type(alam) == N.ndarray:
frozen = -1 *N.ones(alam.shape,N.float64)
alam = alam.astype(N.float64)
arrayflag = 1
else:
frozen = N.array(-1.)
alam = N.array(alam,N.float64)
arrayflag = 1
mask = N.zeros(alam.shape)
fac = 2.0 *N.ones(alam.shape,N.float_)
sum = N.zeros(alam.shape,N.float_)
termbf = N.zeros(alam.shape,N.float_)
a2 = N.array(-2.0*alam*alam,N.float64)
totalelements = N.multiply.reduce(N.array(mask.shape))
for j in range(1,201):
if asum(mask) == totalelements:
break
exponents = (a2*j*j)
overflowmask = N.less(exponents,-746)
frozen = N.where(overflowmask,0,frozen)
mask = mask+overflowmask
term = fac*N.exp(exponents)
sum = sum + term
newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +
N.less(abs(term),1.0e-8*sum), 1, 0)
frozen = N.where(newmask*N.equal(mask,0), sum, frozen)
mask = N.clip(mask+newmask,0,1)
fac = -fac
termbf = abs(term)
if arrayflag:
return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge
else:
return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge
def afprob (dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if type(F) == N.ndarray:
return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
else:
return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
def abetacf(a,b,x,verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
dimensions for x.
Usage: abetacf(a,b,x,verbose=1)
"""
ITMAX = 200
EPS = 3.0e-7
arrayflag = 1
if type(x) == N.ndarray:
frozen = N.ones(x.shape,N.float_) *-1 #start out w/ -1s, should replace all
else:
arrayflag = 0
frozen = N.array([-1])
x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
if N.sum(N.ravel(N.equal(frozen,-1)))==0:
break
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az*1
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
newmask = N.less(abs(az-aold),EPS*abs(az))
frozen = N.where(newmask*N.equal(mask,0), az, frozen)
mask = N.clip(mask+newmask,0,1)
noconverge = asum(N.equal(frozen,-1))
if noconverge != 0 and verbose:
print('a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements')
if arrayflag:
return frozen
else:
return frozen[0]
def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Adapted from: Numerical Recipies in C. Can handle multiple dims ... but
probably doesn't normally have to.
Usage: agammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + N.log(2.50662827465*ser)
def abetai(a,b,x,verbose=1):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented
here, using the betacf function. (Adapted from: Numerical Recipies in
C.) Can handle multiple dimensions.
Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if type(a) == N.ndarray:
if asum(N.less(x,0)+N.greater(x,1)) != 0:
raise ValueError('Bad x in abetai')
x = N.where(N.equal(x,0),TINY,x)
x = N.where(N.equal(x,1.0),1-TINY,x)
bt = N.where(N.equal(x,0)+N.equal(x,1), 0, -1)
exponents = ( gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b*
N.log(1.0-x) )
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
exponents = N.where(N.less(exponents,-740),-740,exponents)
bt = N.exp(exponents)
if type(x) == N.ndarray:
ans = N.where(N.less(x,(a+1)/(a+b+2.0)),
bt*abetacf(a,b,x,verbose)/float(a),
1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b))
else:
if x<(a+1)/(a+b+2.0):
ans = bt*abetacf(a,b,x,verbose)/float(a)
else:
ans = 1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b)
return ans
#####################################
####### AANOVA CALCULATIONS #######
#####################################
import LinearAlgebra, operator
LA = LinearAlgebra
def aglm(data,para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) != len(data):
print("data and para must be same length in aglm")
return
n = len(para)
p = _pstat.aunique(para)
x = N.zeros((n,len(p))) # design matrix
for l in range(len(p)):
x[:,l] = N.equal(para,p[l])
b = N.dot(N.dot(LA.inv(N.dot(N.transpose(x),x)), # i.e., b=inv(X'X)X'Y
N.transpose(x)),
data)
diffs = (data - N.dot(x,b))
s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = N.array([1,-1])
df = n-2
fact = asum(1.0/asum(x,0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = N.dot(c,b) / N.sqrt(s_sq*fact)
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
return t, probs
def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
means = [0]*na
vars = [0]*na
ns = [0]*na
alldata = []
tmp = list(map(N.array,args))
means = list(map(amean,tmp))
vars = list(map(avar,tmp))
ns = list(map(len,args))
alldata = N.concatenate(args)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def aF_value (ER,EF,dfR,dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
Enum = round(Enum,3)
Eden = round(Eden,3)
dfnum = round(Enum,3)
dfden = round(dfden,3)
f = round(f,3)
prob = round(prob,3)
suffix = '' # for *s after the p-value
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['EF/ER','DF','Mean Square','F-value','prob','']]
lofl = title+[[Enum, dfnum, round(Enum/float(dfnum),3), f, prob, suffix],
[Eden, dfden, round(Eden/float(dfden),3),'','','']]
_pstat.printcc(lofl)
return
def F_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
if type(ER) in [int, float]:
ER = N.array([[ER]])
if type(EF) in [int, float]:
EF = N.array([[EF]])
n_um = (LA.det(ER) - LA.det(EF)) / float(dfnum)
d_en = LA.det(EF) / float(dfden)
return n_um / d_en
#####################################
####### ASUPPORT FUNCTIONS ########
#####################################
def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((type(a) == type(1.4)) or (type(a) == type(1))):
return a-a-N.less(a,0)+N.greater(a,0)
else:
return N.zeros(N.shape(a))-N.less(a,0)+N.greater(a,0)
def asum (a, dimension=None,keepdims=0):
"""
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the resulting array will have as many
dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:
a = a.astype(N.float_)
if dimension == None:
s = N.sum(N.ravel(a))
elif type(dimension) in [int,float]:
s = N.add.reduce(a, dimension)
if keepdims == 1:
shp = list(a.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to sum over
dims = list(dimension)
dims.sort()
dims.reverse()
s = a *1.0
for dim in dims:
s = N.add.reduce(s,dim)
if keepdims == 1:
shp = list(a.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return s
def acumsum (a,dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if type(dimension) in [list, tuple, N.ndarray]:
dimension = list(dimension)
dimension.sort()
dimension.reverse()
for d in dimension:
a = N.add.accumulate(a,d)
return a
else:
return N.add.accumulate(a,dimension)
def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
the array. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions). Set keepdims=1 to maintain the original number
of dimensions.
Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return asum(inarray*inarray,dimension,keepdims)
def asummult (array1,array2,dimension=None,keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension == None:
array1 = N.ravel(array1)
array2 = N.ravel(array2)
dimension = 0
return asum(array1*array2,dimension,keepdims)
def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the returned array will have the same
NUMBER of dimensions as the original.
Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
s = asum(inarray,dimension,keepdims)
if type(s) == N.ndarray:
return s.astype(N.float_)*s
else:
return float(s)*s
def asumdiffsquared(a,b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
keepdims=1 means the return shape = len(a.shape) = len(b.shape)
Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension == None:
inarray = N.ravel(a)
dimension = 0
return asum((a-b)**2,dimension,keepdims)
def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
svec = inarray *1.0
ivec = list(range(n))
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
Usage: arankdata(inarray)
Returns: array of length equal to inarray, containing rank scores
"""
n = len(inarray)
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
newarray = N.zeros(n,N.float_)
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] != svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
numfact = len(data[0])-2
withinvec = [0]*numfact
for col in range(1,numfact+1):
rows = _pstat.linexand(data,col,_pstat.unique(_pstat.colex(data,1))[0]) # get 1 level of this factor
if len(_pstat.unique(_pstat.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor
withinvec[col-1] = 1
return withinvec
#########################################################
#########################################################
###### RE-DEFINE DISPATCHES TO INCLUDE ARRAYS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (list, tuple)),
(ageometricmean, (N.ndarray,)) )
harmonicmean = Dispatch ( (lharmonicmean, (list, tuple)),
(aharmonicmean, (N.ndarray,)) )
mean = Dispatch ( (lmean, (list, tuple)),
(amean, (N.ndarray,)) )
median = Dispatch ( (lmedian, (list, tuple)),
(amedian, (N.ndarray,)) )
medianscore = Dispatch ( (lmedianscore, (list, tuple)),
(amedianscore, (N.ndarray,)) )
mode = Dispatch ( (lmode, (list, tuple)),
(amode, (N.ndarray,)) )
tmean = Dispatch ( (atmean, (N.ndarray,)) )
tvar = Dispatch ( (atvar, (N.ndarray,)) )
tstdev = Dispatch ( (atstdev, (N.ndarray,)) )
tsem = Dispatch ( (atsem, (N.ndarray,)) )
## VARIATION:
moment = Dispatch ( (lmoment, (list, tuple)),
(amoment, (N.ndarray,)) )
variation = Dispatch ( (lvariation, (list, tuple)),
(avariation, (N.ndarray,)) )
skew = Dispatch ( (lskew, (list, tuple)),
(askew, (N.ndarray,)) )
kurtosis = Dispatch ( (lkurtosis, (list, tuple)),
(akurtosis, (N.ndarray,)) )
describe = Dispatch ( (ldescribe, (list, tuple)),
(adescribe, (N.ndarray,)) )
## DISTRIBUTION TESTS
skewtest = Dispatch ( (askewtest, (list, tuple)),
(askewtest, (N.ndarray,)) )
kurtosistest = Dispatch ( (akurtosistest, (list, tuple)),
(akurtosistest, (N.ndarray,)) )
normaltest = Dispatch ( (anormaltest, (list, tuple)),
(anormaltest, (N.ndarray,)) )
## FREQUENCY STATS:
itemfreq = Dispatch ( (litemfreq, (list, tuple)),
(aitemfreq, (N.ndarray,)) )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (list, tuple)),
(ascoreatpercentile, (N.ndarray,)) )
percentileofscore = Dispatch ( (lpercentileofscore, (list, tuple)),
(apercentileofscore, (N.ndarray,)) )
histogram = Dispatch ( (lhistogram, (list, tuple)),
(ahistogram, (N.ndarray,)) )
cumfreq = Dispatch ( (lcumfreq, (list, tuple)),
(acumfreq, (N.ndarray,)) )
relfreq = Dispatch ( (lrelfreq, (list, tuple)),
(arelfreq, (N.ndarray,)) )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (list, tuple)),
(aobrientransform, (N.ndarray,)) )
samplevar = Dispatch ( (lsamplevar, (list, tuple)),
(asamplevar, (N.ndarray,)) )
samplestdev = Dispatch ( (lsamplestdev, (list, tuple)),
(asamplestdev, (N.ndarray,)) )
signaltonoise = Dispatch( (asignaltonoise, (N.ndarray,)),)
var = Dispatch ( (lvar, (list, tuple)),
(avar, (N.ndarray,)) )
stdev = Dispatch ( (lstdev, (list, tuple)),
(astdev, (N.ndarray,)) )
sterr = Dispatch ( (lsterr, (list, tuple)),
(asterr, (N.ndarray,)) )
sem = Dispatch ( (lsem, (list, tuple)),
(asem, (N.ndarray,)) )
z = Dispatch ( (lz, (list, tuple)),
(az, (N.ndarray,)) )
zs = Dispatch ( (lzs, (list, tuple)),
(azs, (N.ndarray,)) )
## TRIMMING FCNS:
threshold = Dispatch( (athreshold, (N.ndarray,)),)
trimboth = Dispatch ( (ltrimboth, (list, tuple)),
(atrimboth, (N.ndarray,)) )
trim1 = Dispatch ( (ltrim1, (list, tuple)),
(atrim1, (N.ndarray,)) )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (list, tuple)),
(apaired, (N.ndarray,)) )
lincc = Dispatch ( (llincc, (list, tuple)),
(alincc, (N.ndarray,)) )
pearsonr = Dispatch ( (lpearsonr, (list, tuple)),
(apearsonr, (N.ndarray,)) )
spearmanr = Dispatch ( (lspearmanr, (list, tuple)),
(aspearmanr, (N.ndarray,)) )
pointbiserialr = Dispatch ( (lpointbiserialr, (list, tuple)),
(apointbiserialr, (N.ndarray,)) )
kendalltau = Dispatch ( (lkendalltau, (list, tuple)),
(akendalltau, (N.ndarray,)) )
linregress = Dispatch ( (llinregress, (list, tuple)),
(alinregress, (N.ndarray,)) )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (list, tuple)),
(attest_1samp, (N.ndarray,)) )
ttest_ind = Dispatch ( (lttest_ind, (list, tuple)),
(attest_ind, (N.ndarray,)) )
ttest_rel = Dispatch ( (lttest_rel, (list, tuple)),
(attest_rel, (N.ndarray,)) )
chisquare = Dispatch ( (lchisquare, (list, tuple)),
(achisquare, (N.ndarray,)) )
ks_2samp = Dispatch ( (lks_2samp, (list, tuple)),
(aks_2samp, (N.ndarray,)) )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (list, tuple)),
(amannwhitneyu, (N.ndarray,)) )
tiecorrect = Dispatch ( (ltiecorrect, (list, tuple)),
(atiecorrect, (N.ndarray,)) )
ranksums = Dispatch ( (lranksums, (list, tuple)),
(aranksums, (N.ndarray,)) )
wilcoxont = Dispatch ( (lwilcoxont, (list, tuple)),
(awilcoxont, (N.ndarray,)) )
kruskalwallish = Dispatch ( (lkruskalwallish, (list, tuple)),
(akruskalwallish, (N.ndarray,)) )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (list, tuple)),
(afriedmanchisquare, (N.ndarray,)) )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (int, float)),
(achisqprob, (N.ndarray,)) )
zprob = Dispatch ( (lzprob, (int, float)),
(azprob, (N.ndarray,)) )
ksprob = Dispatch ( (lksprob, (int, float)),
(aksprob, (N.ndarray,)) )
fprob = Dispatch ( (lfprob, (int, float)),
(afprob, (N.ndarray,)) )
betacf = Dispatch ( (lbetacf, (int, float)),
(abetacf, (N.ndarray,)) )
betai = Dispatch ( (lbetai, (int, float)),
(abetai, (N.ndarray,)) )
erfcc = Dispatch ( (lerfcc, (int, float)),
(aerfcc, (N.ndarray,)) )
gammln = Dispatch ( (lgammln, (int, float)),
(agammln, (N.ndarray,)) )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (list, tuple)),
(aF_oneway, (N.ndarray,)) )
F_value = Dispatch ( (lF_value, (list, tuple)),
(aF_value, (N.ndarray,)) )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (list, tuple, N.ndarray)), )
sum = Dispatch ( (lsum, (list, tuple)),
(asum, (N.ndarray,)) )
cumsum = Dispatch ( (lcumsum, (list, tuple)),
(acumsum, (N.ndarray,)) )
ss = Dispatch ( (lss, (list, tuple)),
(ass, (N.ndarray,)) )
summult = Dispatch ( (lsummult, (list, tuple)),
(asummult, (N.ndarray,)) )
square_of_sums = Dispatch ( (lsquare_of_sums, (list, tuple)),
(asquare_of_sums, (N.ndarray,)) )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (list, tuple)),
(asumdiffsquared, (N.ndarray,)) )
shellsort = Dispatch ( (lshellsort, (list, tuple)),
(ashellsort, (N.ndarray,)) )
rankdata = Dispatch ( (lrankdata, (list, tuple)),
(arankdata, (N.ndarray,)) )
findwithin = Dispatch ( (lfindwithin, (list, tuple)),
(afindwithin, (N.ndarray,)) )
###################### END OF NUMERIC FUNCTION BLOCK #####################
###################### END OF STATISTICAL FUNCTIONS ######################
except ImportError:
pass
| 34.59808 | 177 | 0.594641 |
be698a3cad3588bbfb4332971533e1b6be7968a3 | 61,804 | py | Python | src/azure-cli-core/azure/cli/core/_profile.py | francescanarea/azure-cli | b01fbe373a059aa6c746e6e067aac4ac77323211 | [
"MIT"
] | null | null | null | src/azure-cli-core/azure/cli/core/_profile.py | francescanarea/azure-cli | b01fbe373a059aa6c746e6e067aac4ac77323211 | [
"MIT"
] | 1 | 2021-06-02T02:50:00.000Z | 2021-06-02T02:50:00.000Z | src/azure-cli-core/azure/cli/core/_profile.py | francescanarea/azure-cli | b01fbe373a059aa6c746e6e067aac4ac77323211 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import collections
import errno
import json
import os
import os.path
import re
import string
from copy import deepcopy
from enum import Enum
from azure.cli.core._environment import get_config_dir
from azure.cli.core._session import ACCOUNT
from azure.cli.core.util import get_file_json, in_cloud_console, open_page_in_browser, can_launch_browser,\
is_windows, is_wsl
from azure.cli.core.cloud import get_active_cloud, set_cloud_subscription
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
# Names below are used by azure-xplat-cli to persist account information into
# ~/.azure/azureProfile.json or osx/keychainer or windows secure storage,
# which azure-cli will share.
# Please do not rename them unless you know what you are doing.
_IS_DEFAULT_SUBSCRIPTION = 'isDefault'
_SUBSCRIPTION_ID = 'id'
_SUBSCRIPTION_NAME = 'name'
# Tenant of the token which is used to list the subscription
_TENANT_ID = 'tenantId'
# Home tenant of the subscription, which maps to tenantId in 'Subscriptions - List REST API'
# https://docs.microsoft.com/en-us/rest/api/resources/subscriptions/list
_HOME_TENANT_ID = 'homeTenantId'
_MANAGED_BY_TENANTS = 'managedByTenants'
_USER_ENTITY = 'user'
_USER_NAME = 'name'
_CLOUD_SHELL_ID = 'cloudShellID'
_SUBSCRIPTIONS = 'subscriptions'
_INSTALLATION_ID = 'installationId'
_ENVIRONMENT_NAME = 'environmentName'
_STATE = 'state'
_USER_TYPE = 'type'
_USER = 'user'
_SERVICE_PRINCIPAL = 'servicePrincipal'
_SERVICE_PRINCIPAL_ID = 'servicePrincipalId'
_SERVICE_PRINCIPAL_TENANT = 'servicePrincipalTenant'
_SERVICE_PRINCIPAL_CERT_FILE = 'certificateFile'
_SERVICE_PRINCIPAL_CERT_THUMBPRINT = 'thumbprint'
_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH = 'useCertSNIssuerAuth'
_TOKEN_ENTRY_USER_ID = 'userId'
_TOKEN_ENTRY_TOKEN_TYPE = 'tokenType'
# This could mean either real access token, or client secret of a service principal
# This naming is no good, but can't change because xplat-cli does so.
_ACCESS_TOKEN = 'accessToken'
_REFRESH_TOKEN = 'refreshToken'
TOKEN_FIELDS_EXCLUDED_FROM_PERSISTENCE = ['familyName',
'givenName',
'isUserIdDisplayable',
'tenantId']
_CLIENT_ID = '04b07795-8ddb-461a-bbee-02f9e1bf7b46'
_COMMON_TENANT = 'common'
_TENANT_LEVEL_ACCOUNT_NAME = 'N/A(tenant level account)'
_SYSTEM_ASSIGNED_IDENTITY = 'systemAssignedIdentity'
_USER_ASSIGNED_IDENTITY = 'userAssignedIdentity'
_ASSIGNED_IDENTITY_INFO = 'assignedIdentityInfo'
_AZ_LOGIN_MESSAGE = "Please run 'az login' to setup account."
def load_subscriptions(cli_ctx, all_clouds=False, refresh=False):
profile = Profile(cli_ctx=cli_ctx)
if refresh:
profile.refresh_accounts()
subscriptions = profile.load_cached_subscriptions(all_clouds)
return subscriptions
def _get_authority_url(cli_ctx, tenant):
authority_url = cli_ctx.cloud.endpoints.active_directory
is_adfs = bool(re.match('.+(/adfs|/adfs/)$', authority_url, re.I))
if is_adfs:
authority_url = authority_url.rstrip('/') # workaround: ADAL is known to reject auth urls with trailing /
else:
authority_url = authority_url.rstrip('/') + '/' + (tenant or _COMMON_TENANT)
return authority_url, is_adfs
def _authentication_context_factory(cli_ctx, tenant, cache):
import adal
authority_url, is_adfs = _get_authority_url(cli_ctx, tenant)
return adal.AuthenticationContext(authority_url, cache=cache, api_version=None, validate_authority=(not is_adfs))
_AUTH_CTX_FACTORY = _authentication_context_factory
def _load_tokens_from_file(file_path):
if os.path.isfile(file_path):
try:
return get_file_json(file_path, throw_on_empty=False) or []
except (CLIError, ValueError) as ex:
raise CLIError("Failed to load token files. If you have a repro, please log an issue at "
"https://github.com/Azure/azure-cli/issues. At the same time, you can clean "
"up by running 'az account clear' and then 'az login'. (Inner Error: {})".format(ex))
return []
def _delete_file(file_path):
try:
os.remove(file_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def get_credential_types(cli_ctx):
class CredentialType(Enum): # pylint: disable=too-few-public-methods
cloud = get_active_cloud(cli_ctx)
management = cli_ctx.cloud.endpoints.management
rbac = cli_ctx.cloud.endpoints.active_directory_graph_resource_id
return CredentialType
def _get_cloud_console_token_endpoint():
return os.environ.get('MSI_ENDPOINT')
# pylint: disable=too-many-lines,too-many-instance-attributes
class Profile(object):
_global_creds_cache = None
def __init__(self, storage=None, auth_ctx_factory=None, use_global_creds_cache=True,
async_persist=True, cli_ctx=None):
from azure.cli.core import get_default_cli
self.cli_ctx = cli_ctx or get_default_cli()
self._storage = storage or ACCOUNT
self.auth_ctx_factory = auth_ctx_factory or _AUTH_CTX_FACTORY
if use_global_creds_cache:
# for perf, use global cache
if not Profile._global_creds_cache:
Profile._global_creds_cache = CredsCache(self.cli_ctx, self.auth_ctx_factory,
async_persist=async_persist)
self._creds_cache = Profile._global_creds_cache
else:
self._creds_cache = CredsCache(self.cli_ctx, self.auth_ctx_factory, async_persist=async_persist)
self._management_resource_uri = self.cli_ctx.cloud.endpoints.management
self._ad_resource_uri = self.cli_ctx.cloud.endpoints.active_directory_resource_id
self._ad = self.cli_ctx.cloud.endpoints.active_directory
self._msi_creds = None
def find_subscriptions_on_login(self,
interactive,
username,
password,
is_service_principal,
tenant,
use_device_code=False,
allow_no_subscriptions=False,
subscription_finder=None,
use_cert_sn_issuer=None):
from azure.cli.core._debug import allow_debug_adal_connection
allow_debug_adal_connection()
subscriptions = []
if not subscription_finder:
subscription_finder = SubscriptionFinder(self.cli_ctx,
self.auth_ctx_factory,
self._creds_cache.adal_token_cache)
if interactive:
if not use_device_code and (in_cloud_console() or not can_launch_browser()):
logger.info('Detect no GUI is available, so fall back to device code')
use_device_code = True
if not use_device_code:
try:
authority_url, _ = _get_authority_url(self.cli_ctx, tenant)
subscriptions = subscription_finder.find_through_authorization_code_flow(
tenant, self._ad_resource_uri, authority_url)
except RuntimeError:
use_device_code = True
logger.warning('Not able to launch a browser to log you in, falling back to device code...')
if use_device_code:
subscriptions = subscription_finder.find_through_interactive_flow(
tenant, self._ad_resource_uri)
else:
if is_service_principal:
if not tenant:
raise CLIError('Please supply tenant using "--tenant"')
sp_auth = ServicePrincipalAuth(password, use_cert_sn_issuer)
subscriptions = subscription_finder.find_from_service_principal_id(
username, sp_auth, tenant, self._ad_resource_uri)
else:
subscriptions = subscription_finder.find_from_user_account(
username, password, tenant, self._ad_resource_uri)
if not allow_no_subscriptions and not subscriptions:
if username:
msg = "No subscriptions found for {}.".format(username)
else:
# Don't show username if bare 'az login' is used
msg = "No subscriptions found."
raise CLIError(msg)
if is_service_principal:
self._creds_cache.save_service_principal_cred(sp_auth.get_entry_to_persist(username,
tenant))
if self._creds_cache.adal_token_cache.has_state_changed:
self._creds_cache.persist_cached_creds()
if allow_no_subscriptions:
t_list = [s.tenant_id for s in subscriptions]
bare_tenants = [t for t in subscription_finder.tenants if t not in t_list]
profile = Profile(cli_ctx=self.cli_ctx)
tenant_accounts = profile._build_tenant_level_accounts(bare_tenants) # pylint: disable=protected-access
subscriptions.extend(tenant_accounts)
if not subscriptions:
return []
consolidated = self._normalize_properties(subscription_finder.user_id, subscriptions,
is_service_principal, bool(use_cert_sn_issuer))
self._set_subscriptions(consolidated)
# use deepcopy as we don't want to persist these changes to file.
return deepcopy(consolidated)
def _normalize_properties(self, user, subscriptions, is_service_principal, cert_sn_issuer_auth=None,
user_assigned_identity_id=None):
import sys
consolidated = []
for s in subscriptions:
display_name = s.display_name
if display_name is None:
display_name = ''
try:
display_name.encode(sys.getdefaultencoding())
except (UnicodeEncodeError, UnicodeDecodeError): # mainly for Python 2.7 with ascii as the default encoding
display_name = re.sub(r'[^\x00-\x7f]', lambda x: '?', display_name)
subscription_dict = {
_SUBSCRIPTION_ID: s.id.rpartition('/')[2],
_SUBSCRIPTION_NAME: display_name,
_STATE: s.state.value,
_USER_ENTITY: {
_USER_NAME: user,
_USER_TYPE: _SERVICE_PRINCIPAL if is_service_principal else _USER
},
_IS_DEFAULT_SUBSCRIPTION: False,
_TENANT_ID: s.tenant_id,
_ENVIRONMENT_NAME: self.cli_ctx.cloud.name
}
# for Subscriptions - List REST API 2019-06-01's subscription account
if subscription_dict[_SUBSCRIPTION_NAME] != _TENANT_LEVEL_ACCOUNT_NAME:
if hasattr(s, 'home_tenant_id'):
subscription_dict[_HOME_TENANT_ID] = s.home_tenant_id
if hasattr(s, 'managed_by_tenants'):
subscription_dict[_MANAGED_BY_TENANTS] = [{_TENANT_ID: t.tenant_id} for t in s.managed_by_tenants]
consolidated.append(subscription_dict)
if cert_sn_issuer_auth:
consolidated[-1][_USER_ENTITY][_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH] = True
if user_assigned_identity_id:
consolidated[-1][_USER_ENTITY][_ASSIGNED_IDENTITY_INFO] = user_assigned_identity_id
return consolidated
def _build_tenant_level_accounts(self, tenants):
result = []
for t in tenants:
s = self._new_account()
s.id = '/subscriptions/' + t
s.subscription = t
s.tenant_id = t
s.display_name = _TENANT_LEVEL_ACCOUNT_NAME
result.append(s)
return result
def _new_account(self):
from azure.cli.core.profiles import ResourceType, get_sdk
SubscriptionType, StateType = get_sdk(self.cli_ctx, ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS, 'Subscription',
'SubscriptionState', mod='models')
s = SubscriptionType()
s.state = StateType.enabled
return s
def find_subscriptions_in_vm_with_msi(self, identity_id=None, allow_no_subscriptions=None):
# pylint: disable=too-many-statements
import jwt
from requests import HTTPError
from msrestazure.tools import is_valid_resource_id
from azure.cli.core.adal_authentication import MSIAuthenticationWrapper
resource = self.cli_ctx.cloud.endpoints.active_directory_resource_id
if identity_id:
if is_valid_resource_id(identity_id):
msi_creds = MSIAuthenticationWrapper(resource=resource, msi_res_id=identity_id)
identity_type = MsiAccountTypes.user_assigned_resource_id
else:
authenticated = False
try:
msi_creds = MSIAuthenticationWrapper(resource=resource, client_id=identity_id)
identity_type = MsiAccountTypes.user_assigned_client_id
authenticated = True
except HTTPError as ex:
if ex.response.reason == 'Bad Request' and ex.response.status == 400:
logger.info('Sniff: not an MSI client id')
else:
raise
if not authenticated:
try:
identity_type = MsiAccountTypes.user_assigned_object_id
msi_creds = MSIAuthenticationWrapper(resource=resource, object_id=identity_id)
authenticated = True
except HTTPError as ex:
if ex.response.reason == 'Bad Request' and ex.response.status == 400:
logger.info('Sniff: not an MSI object id')
else:
raise
if not authenticated:
raise CLIError('Failed to connect to MSI, check your managed service identity id.')
else:
identity_type = MsiAccountTypes.system_assigned
msi_creds = MSIAuthenticationWrapper(resource=resource)
token_entry = msi_creds.token
token = token_entry['access_token']
logger.info('MSI: token was retrieved. Now trying to initialize local accounts...')
decode = jwt.decode(token, verify=False, algorithms=['RS256'])
tenant = decode['tid']
subscription_finder = SubscriptionFinder(self.cli_ctx, self.auth_ctx_factory, None)
subscriptions = subscription_finder.find_from_raw_token(tenant, token)
base_name = ('{}-{}'.format(identity_type, identity_id) if identity_id else identity_type)
user = _USER_ASSIGNED_IDENTITY if identity_id else _SYSTEM_ASSIGNED_IDENTITY
if not subscriptions:
if allow_no_subscriptions:
subscriptions = self._build_tenant_level_accounts([tenant])
else:
raise CLIError('No access was configured for the VM, hence no subscriptions were found. '
"If this is expected, use '--allow-no-subscriptions' to have tenant level access.")
consolidated = self._normalize_properties(user, subscriptions, is_service_principal=True,
user_assigned_identity_id=base_name)
self._set_subscriptions(consolidated)
return deepcopy(consolidated)
def find_subscriptions_in_cloud_console(self):
import jwt
_, token, _ = self._get_token_from_cloud_shell(self.cli_ctx.cloud.endpoints.active_directory_resource_id)
logger.info('MSI: token was retrieved. Now trying to initialize local accounts...')
decode = jwt.decode(token, verify=False, algorithms=['RS256'])
tenant = decode['tid']
subscription_finder = SubscriptionFinder(self.cli_ctx, self.auth_ctx_factory, None)
subscriptions = subscription_finder.find_from_raw_token(tenant, token)
if not subscriptions:
raise CLIError('No subscriptions were found in the cloud shell')
user = decode.get('unique_name', 'N/A')
consolidated = self._normalize_properties(user, subscriptions, is_service_principal=False)
for s in consolidated:
s[_USER_ENTITY][_CLOUD_SHELL_ID] = True
self._set_subscriptions(consolidated)
return deepcopy(consolidated)
def _get_token_from_cloud_shell(self, resource): # pylint: disable=no-self-use
from azure.cli.core.adal_authentication import MSIAuthenticationWrapper
auth = MSIAuthenticationWrapper(resource=resource)
auth.set_token()
token_entry = auth.token
return (token_entry['token_type'], token_entry['access_token'], token_entry)
def _set_subscriptions(self, new_subscriptions, merge=True, secondary_key_name=None):
def _get_key_name(account, secondary_key_name):
return (account[_SUBSCRIPTION_ID] if secondary_key_name is None
else '{}-{}'.format(account[_SUBSCRIPTION_ID], account[secondary_key_name]))
def _match_account(account, subscription_id, secondary_key_name, secondary_key_val):
return (account[_SUBSCRIPTION_ID] == subscription_id and
(secondary_key_val is None or account[secondary_key_name] == secondary_key_val))
existing_ones = self.load_cached_subscriptions(all_clouds=True)
active_one = next((x for x in existing_ones if x.get(_IS_DEFAULT_SUBSCRIPTION)), None)
active_subscription_id = active_one[_SUBSCRIPTION_ID] if active_one else None
active_secondary_key_val = active_one[secondary_key_name] if (active_one and secondary_key_name) else None
active_cloud = self.cli_ctx.cloud
default_sub_id = None
# merge with existing ones
if merge:
dic = collections.OrderedDict((_get_key_name(x, secondary_key_name), x) for x in existing_ones)
else:
dic = collections.OrderedDict()
dic.update((_get_key_name(x, secondary_key_name), x) for x in new_subscriptions)
subscriptions = list(dic.values())
if subscriptions:
if active_one:
new_active_one = next(
(x for x in new_subscriptions if _match_account(x, active_subscription_id, secondary_key_name,
active_secondary_key_val)), None)
for s in subscriptions:
s[_IS_DEFAULT_SUBSCRIPTION] = False
if not new_active_one:
new_active_one = Profile._pick_working_subscription(new_subscriptions)
else:
new_active_one = Profile._pick_working_subscription(new_subscriptions)
new_active_one[_IS_DEFAULT_SUBSCRIPTION] = True
default_sub_id = new_active_one[_SUBSCRIPTION_ID]
set_cloud_subscription(self.cli_ctx, active_cloud.name, default_sub_id)
self._storage[_SUBSCRIPTIONS] = subscriptions
@staticmethod
def _pick_working_subscription(subscriptions):
from azure.mgmt.resource.subscriptions.models import SubscriptionState
s = next((x for x in subscriptions if x.get(_STATE) == SubscriptionState.enabled.value), None)
return s or subscriptions[0]
def is_tenant_level_account(self):
return self.get_subscription()[_SUBSCRIPTION_NAME] == _TENANT_LEVEL_ACCOUNT_NAME
def set_active_subscription(self, subscription): # take id or name
subscriptions = self.load_cached_subscriptions(all_clouds=True)
active_cloud = self.cli_ctx.cloud
subscription = subscription.lower()
result = [x for x in subscriptions
if subscription in [x[_SUBSCRIPTION_ID].lower(),
x[_SUBSCRIPTION_NAME].lower()] and
x[_ENVIRONMENT_NAME] == active_cloud.name]
if len(result) != 1:
raise CLIError("The subscription of '{}' {} in cloud '{}'.".format(
subscription, "doesn't exist" if not result else 'has more than one match', active_cloud.name))
for s in subscriptions:
s[_IS_DEFAULT_SUBSCRIPTION] = False
result[0][_IS_DEFAULT_SUBSCRIPTION] = True
set_cloud_subscription(self.cli_ctx, active_cloud.name, result[0][_SUBSCRIPTION_ID])
self._storage[_SUBSCRIPTIONS] = subscriptions
def logout(self, user_or_sp):
subscriptions = self.load_cached_subscriptions(all_clouds=True)
result = [x for x in subscriptions
if user_or_sp.lower() == x[_USER_ENTITY][_USER_NAME].lower()]
subscriptions = [x for x in subscriptions if x not in result]
self._storage[_SUBSCRIPTIONS] = subscriptions
self._creds_cache.remove_cached_creds(user_or_sp)
def logout_all(self):
self._storage[_SUBSCRIPTIONS] = []
self._creds_cache.remove_all_cached_creds()
def load_cached_subscriptions(self, all_clouds=False):
subscriptions = self._storage.get(_SUBSCRIPTIONS) or []
active_cloud = self.cli_ctx.cloud
cached_subscriptions = [sub for sub in subscriptions
if all_clouds or sub[_ENVIRONMENT_NAME] == active_cloud.name]
# use deepcopy as we don't want to persist these changes to file.
return deepcopy(cached_subscriptions)
def get_current_account_user(self):
try:
active_account = self.get_subscription()
except CLIError:
raise CLIError('There are no active accounts.')
return active_account[_USER_ENTITY][_USER_NAME]
def get_subscription(self, subscription=None): # take id or name
subscriptions = self.load_cached_subscriptions()
if not subscriptions:
raise CLIError(_AZ_LOGIN_MESSAGE)
result = [x for x in subscriptions if (
not subscription and x.get(_IS_DEFAULT_SUBSCRIPTION) or
subscription and subscription.lower() in [x[_SUBSCRIPTION_ID].lower(), x[
_SUBSCRIPTION_NAME].lower()])]
if not result and subscription:
raise CLIError("Subscription '{}' not found. "
"Check the spelling and casing and try again.".format(subscription))
if not result and not subscription:
raise CLIError("No subscription found. Run 'az account set' to select a subscription.")
if len(result) > 1:
raise CLIError("Multiple subscriptions with the name '{}' found. "
"Specify the subscription ID.".format(subscription))
return result[0]
def get_subscription_id(self, subscription=None): # take id or name
return self.get_subscription(subscription)[_SUBSCRIPTION_ID]
def get_access_token_for_resource(self, username, tenant, resource):
tenant = tenant or 'common'
_, access_token, _ = self._creds_cache.retrieve_token_for_user(
username, tenant, resource)
return access_token
@staticmethod
def _try_parse_msi_account_name(account):
msi_info, user = account[_USER_ENTITY].get(_ASSIGNED_IDENTITY_INFO), account[_USER_ENTITY].get(_USER_NAME)
if user in [_SYSTEM_ASSIGNED_IDENTITY, _USER_ASSIGNED_IDENTITY]:
if not msi_info:
msi_info = account[_SUBSCRIPTION_NAME] # fall back to old persisting way
parts = msi_info.split('-', 1)
if parts[0] in MsiAccountTypes.valid_msi_account_types():
return parts[0], (None if len(parts) <= 1 else parts[1])
return None, None
def get_login_credentials(self, resource=None, subscription_id=None, aux_subscriptions=None, aux_tenants=None):
if aux_tenants and aux_subscriptions:
raise CLIError("Please specify only one of aux_subscriptions and aux_tenants, not both")
account = self.get_subscription(subscription_id)
user_type = account[_USER_ENTITY][_USER_TYPE]
username_or_sp_id = account[_USER_ENTITY][_USER_NAME]
resource = resource or self.cli_ctx.cloud.endpoints.active_directory_resource_id
identity_type, identity_id = Profile._try_parse_msi_account_name(account)
external_tenants_info = []
if aux_tenants:
external_tenants_info = [tenant for tenant in aux_tenants if tenant != account[_TENANT_ID]]
if aux_subscriptions:
ext_subs = [aux_sub for aux_sub in aux_subscriptions if aux_sub != subscription_id]
for ext_sub in ext_subs:
sub = self.get_subscription(ext_sub)
if sub[_TENANT_ID] != account[_TENANT_ID]:
external_tenants_info.append(sub[_TENANT_ID])
if identity_type is None:
def _retrieve_token():
if in_cloud_console() and account[_USER_ENTITY].get(_CLOUD_SHELL_ID):
return self._get_token_from_cloud_shell(resource)
if user_type == _USER:
return self._creds_cache.retrieve_token_for_user(username_or_sp_id,
account[_TENANT_ID], resource)
use_cert_sn_issuer = account[_USER_ENTITY].get(_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH)
return self._creds_cache.retrieve_token_for_service_principal(username_or_sp_id, resource,
account[_TENANT_ID],
use_cert_sn_issuer)
def _retrieve_tokens_from_external_tenants():
external_tokens = []
for sub_tenant_id in external_tenants_info:
if user_type == _USER:
external_tokens.append(self._creds_cache.retrieve_token_for_user(
username_or_sp_id, sub_tenant_id, resource))
else:
external_tokens.append(self._creds_cache.retrieve_token_for_service_principal(
username_or_sp_id, resource, sub_tenant_id, resource))
return external_tokens
from azure.cli.core.adal_authentication import AdalAuthentication
auth_object = AdalAuthentication(_retrieve_token,
_retrieve_tokens_from_external_tenants if external_tenants_info else None)
else:
if self._msi_creds is None:
self._msi_creds = MsiAccountTypes.msi_auth_factory(identity_type, identity_id, resource)
auth_object = self._msi_creds
return (auth_object,
str(account[_SUBSCRIPTION_ID]),
str(account[_TENANT_ID]))
def get_msal_token(self, scopes, data):
"""
This is added only for vmssh feature.
It is a temporary solution and will deprecate after MSAL adopted completely.
"""
account = self.get_subscription()
username = account[_USER_ENTITY][_USER_NAME]
tenant = account[_TENANT_ID] or 'common'
_, refresh_token, _, _ = self.get_refresh_token()
certificate = self._creds_cache.retrieve_msal_token(tenant, scopes, data, refresh_token)
return username, certificate
def get_refresh_token(self, resource=None,
subscription=None):
account = self.get_subscription(subscription)
user_type = account[_USER_ENTITY][_USER_TYPE]
username_or_sp_id = account[_USER_ENTITY][_USER_NAME]
resource = resource or self.cli_ctx.cloud.endpoints.active_directory_resource_id
if user_type == _USER:
_, _, token_entry = self._creds_cache.retrieve_token_for_user(
username_or_sp_id, account[_TENANT_ID], resource)
return None, token_entry.get(_REFRESH_TOKEN), token_entry[_ACCESS_TOKEN], str(account[_TENANT_ID])
sp_secret = self._creds_cache.retrieve_secret_of_service_principal(username_or_sp_id)
return username_or_sp_id, sp_secret, None, str(account[_TENANT_ID])
def get_raw_token(self, resource=None, subscription=None, tenant=None):
if subscription and tenant:
raise CLIError("Please specify only one of subscription and tenant, not both")
account = self.get_subscription(subscription)
user_type = account[_USER_ENTITY][_USER_TYPE]
username_or_sp_id = account[_USER_ENTITY][_USER_NAME]
resource = resource or self.cli_ctx.cloud.endpoints.active_directory_resource_id
identity_type, identity_id = Profile._try_parse_msi_account_name(account)
if identity_type:
# MSI
if tenant:
raise CLIError("Tenant shouldn't be specified for MSI account")
msi_creds = MsiAccountTypes.msi_auth_factory(identity_type, identity_id, resource)
msi_creds.set_token()
token_entry = msi_creds.token
creds = (token_entry['token_type'], token_entry['access_token'], token_entry)
elif in_cloud_console() and account[_USER_ENTITY].get(_CLOUD_SHELL_ID):
# Cloud Shell
if tenant:
raise CLIError("Tenant shouldn't be specified for Cloud Shell account")
creds = self._get_token_from_cloud_shell(resource)
else:
tenant_dest = tenant if tenant else account[_TENANT_ID]
if user_type == _USER:
# User
creds = self._creds_cache.retrieve_token_for_user(username_or_sp_id,
tenant_dest, resource)
else:
# Service Principal
use_cert_sn_issuer = bool(account[_USER_ENTITY].get(_SERVICE_PRINCIPAL_CERT_SN_ISSUER_AUTH))
creds = self._creds_cache.retrieve_token_for_service_principal(username_or_sp_id,
resource,
tenant_dest,
use_cert_sn_issuer)
return (creds,
None if tenant else str(account[_SUBSCRIPTION_ID]),
str(tenant if tenant else account[_TENANT_ID]))
def refresh_accounts(self, subscription_finder=None):
subscriptions = self.load_cached_subscriptions()
to_refresh = subscriptions
from azure.cli.core._debug import allow_debug_adal_connection
allow_debug_adal_connection()
subscription_finder = subscription_finder or SubscriptionFinder(self.cli_ctx,
self.auth_ctx_factory,
self._creds_cache.adal_token_cache)
refreshed_list = set()
result = []
for s in to_refresh:
user_name = s[_USER_ENTITY][_USER_NAME]
if user_name in refreshed_list:
continue
refreshed_list.add(user_name)
is_service_principal = (s[_USER_ENTITY][_USER_TYPE] == _SERVICE_PRINCIPAL)
tenant = s[_TENANT_ID]
subscriptions = []
try:
if is_service_principal:
sp_auth = ServicePrincipalAuth(self._creds_cache.retrieve_secret_of_service_principal(user_name))
subscriptions = subscription_finder.find_from_service_principal_id(user_name, sp_auth, tenant,
self._ad_resource_uri)
else:
subscriptions = subscription_finder.find_from_user_account(user_name, None, None,
self._ad_resource_uri)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Refreshing for '%s' failed with an error '%s'. The existing accounts were not "
"modified. You can run 'az login' later to explicitly refresh them", user_name, ex)
result += deepcopy([r for r in to_refresh if r[_USER_ENTITY][_USER_NAME] == user_name])
continue
if not subscriptions:
if s[_SUBSCRIPTION_NAME] == _TENANT_LEVEL_ACCOUNT_NAME:
subscriptions = self._build_tenant_level_accounts([s[_TENANT_ID]])
if not subscriptions:
continue
consolidated = self._normalize_properties(subscription_finder.user_id,
subscriptions,
is_service_principal)
result += consolidated
if self._creds_cache.adal_token_cache.has_state_changed:
self._creds_cache.persist_cached_creds()
self._set_subscriptions(result, merge=False)
def get_sp_auth_info(self, subscription_id=None, name=None, password=None, cert_file=None):
from collections import OrderedDict
account = self.get_subscription(subscription_id)
# is the credential created through command like 'create-for-rbac'?
result = OrderedDict()
if name and (password or cert_file):
result['clientId'] = name
if password:
result['clientSecret'] = password
else:
result['clientCertificate'] = cert_file
result['subscriptionId'] = subscription_id or account[_SUBSCRIPTION_ID]
else: # has logged in through cli
user_type = account[_USER_ENTITY].get(_USER_TYPE)
if user_type == _SERVICE_PRINCIPAL:
result['clientId'] = account[_USER_ENTITY][_USER_NAME]
sp_auth = ServicePrincipalAuth(self._creds_cache.retrieve_secret_of_service_principal(
account[_USER_ENTITY][_USER_NAME]))
secret = getattr(sp_auth, 'secret', None)
if secret:
result['clientSecret'] = secret
else:
# we can output 'clientCertificateThumbprint' if asked
result['clientCertificate'] = sp_auth.certificate_file
result['subscriptionId'] = account[_SUBSCRIPTION_ID]
else:
raise CLIError('SDK Auth file is only applicable when authenticated using a service principal')
result[_TENANT_ID] = account[_TENANT_ID]
endpoint_mappings = OrderedDict() # use OrderedDict to control the output sequence
endpoint_mappings['active_directory'] = 'activeDirectoryEndpointUrl'
endpoint_mappings['resource_manager'] = 'resourceManagerEndpointUrl'
endpoint_mappings['active_directory_graph_resource_id'] = 'activeDirectoryGraphResourceId'
endpoint_mappings['sql_management'] = 'sqlManagementEndpointUrl'
endpoint_mappings['gallery'] = 'galleryEndpointUrl'
endpoint_mappings['management'] = 'managementEndpointUrl'
for e in endpoint_mappings:
result[endpoint_mappings[e]] = getattr(get_active_cloud(self.cli_ctx).endpoints, e)
return result
def get_installation_id(self):
installation_id = self._storage.get(_INSTALLATION_ID)
if not installation_id:
import uuid
installation_id = str(uuid.uuid1())
self._storage[_INSTALLATION_ID] = installation_id
return installation_id
class MsiAccountTypes(object):
# pylint: disable=no-method-argument,no-self-argument
system_assigned = 'MSI'
user_assigned_client_id = 'MSIClient'
user_assigned_object_id = 'MSIObject'
user_assigned_resource_id = 'MSIResource'
@staticmethod
def valid_msi_account_types():
return [MsiAccountTypes.system_assigned, MsiAccountTypes.user_assigned_client_id,
MsiAccountTypes.user_assigned_object_id, MsiAccountTypes.user_assigned_resource_id]
@staticmethod
def msi_auth_factory(cli_account_name, identity, resource):
from azure.cli.core.adal_authentication import MSIAuthenticationWrapper
if cli_account_name == MsiAccountTypes.system_assigned:
return MSIAuthenticationWrapper(resource=resource)
if cli_account_name == MsiAccountTypes.user_assigned_client_id:
return MSIAuthenticationWrapper(resource=resource, client_id=identity)
if cli_account_name == MsiAccountTypes.user_assigned_object_id:
return MSIAuthenticationWrapper(resource=resource, object_id=identity)
if cli_account_name == MsiAccountTypes.user_assigned_resource_id:
return MSIAuthenticationWrapper(resource=resource, msi_res_id=identity)
raise ValueError("unrecognized msi account name '{}'".format(cli_account_name))
class SubscriptionFinder(object):
'''finds all subscriptions for a user or service principal'''
def __init__(self, cli_ctx, auth_context_factory, adal_token_cache, arm_client_factory=None):
self._adal_token_cache = adal_token_cache
self._auth_context_factory = auth_context_factory
self.user_id = None # will figure out after log user in
self.cli_ctx = cli_ctx
def create_arm_client_factory(credentials):
if arm_client_factory:
return arm_client_factory(credentials)
from azure.cli.core.profiles._shared import get_client_class
from azure.cli.core.profiles import ResourceType, get_api_version
from azure.cli.core.commands.client_factory import configure_common_settings
client_type = get_client_class(ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS)
api_version = get_api_version(cli_ctx, ResourceType.MGMT_RESOURCE_SUBSCRIPTIONS)
client = client_type(credentials, api_version=api_version,
base_url=self.cli_ctx.cloud.endpoints.resource_manager)
configure_common_settings(cli_ctx, client)
return client
self._arm_client_factory = create_arm_client_factory
self.tenants = []
def find_from_user_account(self, username, password, tenant, resource):
context = self._create_auth_context(tenant)
if password:
token_entry = context.acquire_token_with_username_password(resource, username, password, _CLIENT_ID)
else: # when refresh account, we will leverage local cached tokens
token_entry = context.acquire_token(resource, username, _CLIENT_ID)
if not token_entry:
return []
self.user_id = token_entry[_TOKEN_ENTRY_USER_ID]
if tenant is None:
result = self._find_using_common_tenant(token_entry[_ACCESS_TOKEN], resource)
else:
result = self._find_using_specific_tenant(tenant, token_entry[_ACCESS_TOKEN])
return result
def find_through_authorization_code_flow(self, tenant, resource, authority_url):
# launch browser and get the code
results = _get_authorization_code(resource, authority_url)
if not results.get('code'):
raise CLIError('Login failed') # error detail is already displayed through previous steps
# exchange the code for the token
context = self._create_auth_context(tenant)
token_entry = context.acquire_token_with_authorization_code(results['code'], results['reply_url'],
resource, _CLIENT_ID, None)
self.user_id = token_entry[_TOKEN_ENTRY_USER_ID]
logger.warning("You have logged in. Now let us find all the subscriptions to which you have access...")
if tenant is None:
result = self._find_using_common_tenant(token_entry[_ACCESS_TOKEN], resource)
else:
result = self._find_using_specific_tenant(tenant, token_entry[_ACCESS_TOKEN])
return result
def find_through_interactive_flow(self, tenant, resource):
context = self._create_auth_context(tenant)
code = context.acquire_user_code(resource, _CLIENT_ID)
logger.warning(code['message'])
token_entry = context.acquire_token_with_device_code(resource, code, _CLIENT_ID)
self.user_id = token_entry[_TOKEN_ENTRY_USER_ID]
if tenant is None:
result = self._find_using_common_tenant(token_entry[_ACCESS_TOKEN], resource)
else:
result = self._find_using_specific_tenant(tenant, token_entry[_ACCESS_TOKEN])
return result
def find_from_service_principal_id(self, client_id, sp_auth, tenant, resource):
context = self._create_auth_context(tenant, False)
token_entry = sp_auth.acquire_token(context, resource, client_id)
self.user_id = client_id
result = self._find_using_specific_tenant(tenant, token_entry[_ACCESS_TOKEN])
self.tenants = [tenant]
return result
# only occur inside cloud console or VM with identity
def find_from_raw_token(self, tenant, token):
# decode the token, so we know the tenant
result = self._find_using_specific_tenant(tenant, token)
self.tenants = [tenant]
return result
def _create_auth_context(self, tenant, use_token_cache=True):
token_cache = self._adal_token_cache if use_token_cache else None
return self._auth_context_factory(self.cli_ctx, tenant, token_cache)
def _find_using_common_tenant(self, access_token, resource):
import adal
from msrest.authentication import BasicTokenAuthentication
all_subscriptions = []
empty_tenants = []
mfa_tenants = []
token_credential = BasicTokenAuthentication({'access_token': access_token})
client = self._arm_client_factory(token_credential)
tenants = client.tenants.list()
for t in tenants:
tenant_id = t.tenant_id
# display_name is available since /tenants?api-version=2018-06-01,
# not available in /tenants?api-version=2016-06-01
if not hasattr(t, 'display_name'):
t.display_name = None
if hasattr(t, 'additional_properties'): # Remove this line once SDK is fixed
t.display_name = t.additional_properties.get('displayName')
temp_context = self._create_auth_context(tenant_id)
try:
temp_credentials = temp_context.acquire_token(resource, self.user_id, _CLIENT_ID)
except adal.AdalError as ex:
# because user creds went through the 'common' tenant, the error here must be
# tenant specific, like the account was disabled. For such errors, we will continue
# with other tenants.
msg = (getattr(ex, 'error_response', None) or {}).get('error_description') or ''
if 'AADSTS50076' in msg:
# The tenant requires MFA and can't be accessed with home tenant's refresh token
mfa_tenants.append(t)
else:
logger.warning("Failed to authenticate '%s' due to error '%s'", t, ex)
continue
subscriptions = self._find_using_specific_tenant(
tenant_id,
temp_credentials[_ACCESS_TOKEN])
if not subscriptions:
empty_tenants.append(t)
# When a subscription can be listed by multiple tenants, only the first appearance is retained
for sub_to_add in subscriptions:
add_sub = True
for sub_to_compare in all_subscriptions:
if sub_to_add.subscription_id == sub_to_compare.subscription_id:
logger.warning("Subscription %s '%s' can be accessed from tenants %s(default) and %s. "
"To select a specific tenant when accessing this subscription, "
"use 'az login --tenant TENANT_ID'.",
sub_to_add.subscription_id, sub_to_add.display_name,
sub_to_compare.tenant_id, sub_to_add.tenant_id)
add_sub = False
break
if add_sub:
all_subscriptions.append(sub_to_add)
# Show warning for empty tenants
if empty_tenants:
logger.warning("The following tenants don't contain accessible subscriptions. "
"Use 'az login --allow-no-subscriptions' to have tenant level access.")
for t in empty_tenants:
if t.display_name:
logger.warning("%s '%s'", t.tenant_id, t.display_name)
else:
logger.warning("%s", t.tenant_id)
# Show warning for MFA tenants
if mfa_tenants:
logger.warning("The following tenants require Multi-Factor Authentication (MFA). "
"Use 'az login --tenant TENANT_ID' to explicitly login to a tenant.")
for t in mfa_tenants:
if t.display_name:
logger.warning("%s '%s'", t.tenant_id, t.display_name)
else:
logger.warning("%s", t.tenant_id)
return all_subscriptions
def _find_using_specific_tenant(self, tenant, access_token):
from msrest.authentication import BasicTokenAuthentication
token_credential = BasicTokenAuthentication({'access_token': access_token})
client = self._arm_client_factory(token_credential)
subscriptions = client.subscriptions.list()
all_subscriptions = []
for s in subscriptions:
# map tenantId from REST API to homeTenantId
if hasattr(s, "tenant_id"):
setattr(s, 'home_tenant_id', s.tenant_id)
setattr(s, 'tenant_id', tenant)
all_subscriptions.append(s)
self.tenants.append(tenant)
return all_subscriptions
class CredsCache(object):
'''Caches AAD tokena and service principal secrets, and persistence will
also be handled
'''
def __init__(self, cli_ctx, auth_ctx_factory=None, async_persist=True):
# AZURE_ACCESS_TOKEN_FILE is used by Cloud Console and not meant to be user configured
self._token_file = (os.environ.get('AZURE_ACCESS_TOKEN_FILE', None) or
os.path.join(get_config_dir(), 'accessTokens.json'))
self._service_principal_creds = []
self._auth_ctx_factory = auth_ctx_factory
self._adal_token_cache_attr = None
self._should_flush_to_disk = False
self._async_persist = async_persist
self._ctx = cli_ctx
if async_persist:
import atexit
atexit.register(self.flush_to_disk)
def persist_cached_creds(self):
self._should_flush_to_disk = True
if not self._async_persist:
self.flush_to_disk()
self.adal_token_cache.has_state_changed = False
def flush_to_disk(self):
if self._should_flush_to_disk:
with os.fdopen(os.open(self._token_file, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as cred_file:
items = self.adal_token_cache.read_items()
all_creds = [entry for _, entry in items]
# trim away useless fields (needed for cred sharing with xplat)
for i in all_creds:
for key in TOKEN_FIELDS_EXCLUDED_FROM_PERSISTENCE:
i.pop(key, None)
all_creds.extend(self._service_principal_creds)
cred_file.write(json.dumps(all_creds))
def retrieve_token_for_user(self, username, tenant, resource):
context = self._auth_ctx_factory(self._ctx, tenant, cache=self.adal_token_cache)
token_entry = context.acquire_token(resource, username, _CLIENT_ID)
if not token_entry:
raise CLIError("Could not retrieve token from local cache.{}".format(
" Please run 'az login'." if not in_cloud_console() else ''))
if self.adal_token_cache.has_state_changed:
self.persist_cached_creds()
return (token_entry[_TOKEN_ENTRY_TOKEN_TYPE], token_entry[_ACCESS_TOKEN], token_entry)
def retrieve_msal_token(self, tenant, scopes, data, refresh_token):
"""
This is added only for vmssh feature.
It is a temporary solution and will deprecate after MSAL adopted completely.
"""
from azure.cli.core._msal import AdalRefreshTokenBasedClientApplication
tenant = tenant or 'organizations'
authority = self._ctx.cloud.endpoints.active_directory + '/' + tenant
app = AdalRefreshTokenBasedClientApplication(_CLIENT_ID, authority=authority)
result = app.acquire_token_silent(scopes, None, data=data, refresh_token=refresh_token)
return result["access_token"]
def retrieve_token_for_service_principal(self, sp_id, resource, tenant, use_cert_sn_issuer=False):
self.load_adal_token_cache()
matched = [x for x in self._service_principal_creds if sp_id == x[_SERVICE_PRINCIPAL_ID]]
if not matched:
raise CLIError("Could not retrieve credential from local cache for service principal {}. "
"Please run 'az login' for this service principal."
.format(sp_id))
matched_with_tenant = [x for x in matched if tenant == x[_SERVICE_PRINCIPAL_TENANT]]
if matched_with_tenant:
cred = matched_with_tenant[0]
else:
logger.warning("Could not retrieve credential from local cache for service principal %s under tenant %s. "
"Trying credential under tenant %s, assuming that is an app credential.",
sp_id, tenant, matched[0][_SERVICE_PRINCIPAL_TENANT])
cred = matched[0]
context = self._auth_ctx_factory(self._ctx, tenant, None)
sp_auth = ServicePrincipalAuth(cred.get(_ACCESS_TOKEN, None) or
cred.get(_SERVICE_PRINCIPAL_CERT_FILE, None),
use_cert_sn_issuer)
token_entry = sp_auth.acquire_token(context, resource, sp_id)
return (token_entry[_TOKEN_ENTRY_TOKEN_TYPE], token_entry[_ACCESS_TOKEN], token_entry)
def retrieve_secret_of_service_principal(self, sp_id):
self.load_adal_token_cache()
matched = [x for x in self._service_principal_creds if sp_id == x[_SERVICE_PRINCIPAL_ID]]
if not matched:
raise CLIError("No matched service principal found")
cred = matched[0]
return cred.get(_ACCESS_TOKEN, None)
@property
def adal_token_cache(self):
return self.load_adal_token_cache()
def load_adal_token_cache(self):
if self._adal_token_cache_attr is None:
import adal
all_entries = _load_tokens_from_file(self._token_file)
self._load_service_principal_creds(all_entries)
real_token = [x for x in all_entries if x not in self._service_principal_creds]
self._adal_token_cache_attr = adal.TokenCache(json.dumps(real_token))
return self._adal_token_cache_attr
def save_service_principal_cred(self, sp_entry):
self.load_adal_token_cache()
matched = [x for x in self._service_principal_creds
if sp_entry[_SERVICE_PRINCIPAL_ID] == x[_SERVICE_PRINCIPAL_ID] and
sp_entry[_SERVICE_PRINCIPAL_TENANT] == x[_SERVICE_PRINCIPAL_TENANT]]
state_changed = False
if matched:
# pylint: disable=line-too-long
if (sp_entry.get(_ACCESS_TOKEN, None) != matched[0].get(_ACCESS_TOKEN, None) or
sp_entry.get(_SERVICE_PRINCIPAL_CERT_FILE, None) != matched[0].get(_SERVICE_PRINCIPAL_CERT_FILE, None)):
self._service_principal_creds.remove(matched[0])
self._service_principal_creds.append(sp_entry)
state_changed = True
else:
self._service_principal_creds.append(sp_entry)
state_changed = True
if state_changed:
self.persist_cached_creds()
def _load_service_principal_creds(self, creds):
for c in creds:
if c.get(_SERVICE_PRINCIPAL_ID):
self._service_principal_creds.append(c)
return self._service_principal_creds
def remove_cached_creds(self, user_or_sp):
state_changed = False
# clear AAD tokens
tokens = self.adal_token_cache.find({_TOKEN_ENTRY_USER_ID: user_or_sp})
if tokens:
state_changed = True
self.adal_token_cache.remove(tokens)
# clear service principal creds
matched = [x for x in self._service_principal_creds
if x[_SERVICE_PRINCIPAL_ID] == user_or_sp]
if matched:
state_changed = True
self._service_principal_creds = [x for x in self._service_principal_creds
if x not in matched]
if state_changed:
self.persist_cached_creds()
def remove_all_cached_creds(self):
# we can clear file contents, but deleting it is simpler
_delete_file(self._token_file)
class ServicePrincipalAuth(object):
def __init__(self, password_arg_value, use_cert_sn_issuer=None):
if not password_arg_value:
raise CLIError('missing secret or certificate in order to '
'authenticate through a service principal')
if os.path.isfile(password_arg_value):
certificate_file = password_arg_value
from OpenSSL.crypto import load_certificate, FILETYPE_PEM
self.certificate_file = certificate_file
self.public_certificate = None
try:
with open(certificate_file, 'r') as file_reader:
self.cert_file_string = file_reader.read()
cert = load_certificate(FILETYPE_PEM, self.cert_file_string)
self.thumbprint = cert.digest("sha1").decode()
if use_cert_sn_issuer:
# low-tech but safe parsing based on
# https://github.com/libressl-portable/openbsd/blob/master/src/lib/libcrypto/pem/pem.h
match = re.search(r'\-+BEGIN CERTIFICATE.+\-+(?P<public>[^-]+)\-+END CERTIFICATE.+\-+',
self.cert_file_string, re.I)
self.public_certificate = match.group('public').strip()
except UnicodeDecodeError:
raise CLIError('Invalid certificate, please use a valid PEM file.')
else:
self.secret = password_arg_value
def acquire_token(self, authentication_context, resource, client_id):
if hasattr(self, 'secret'):
return authentication_context.acquire_token_with_client_credentials(resource, client_id, self.secret)
return authentication_context.acquire_token_with_client_certificate(resource, client_id, self.cert_file_string,
self.thumbprint, self.public_certificate)
def get_entry_to_persist(self, sp_id, tenant):
entry = {
_SERVICE_PRINCIPAL_ID: sp_id,
_SERVICE_PRINCIPAL_TENANT: tenant,
}
if hasattr(self, 'secret'):
entry[_ACCESS_TOKEN] = self.secret
else:
entry[_SERVICE_PRINCIPAL_CERT_FILE] = self.certificate_file
entry[_SERVICE_PRINCIPAL_CERT_THUMBPRINT] = self.thumbprint
return entry
def _get_authorization_code_worker(authority_url, resource, results):
# pylint: disable=too-many-statements
import socket
import random
import http.server
class ClientRedirectServer(http.server.HTTPServer): # pylint: disable=too-few-public-methods
query_params = {}
class ClientRedirectHandler(http.server.BaseHTTPRequestHandler):
# pylint: disable=line-too-long
def do_GET(self):
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs # pylint: disable=import-error
if self.path.endswith('/favicon.ico'): # deal with legacy IE
self.send_response(204)
return
query = self.path.split('?', 1)[-1]
query = parse_qs(query, keep_blank_values=True)
self.server.query_params = query
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
landing_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'auth_landing_pages',
'ok.html' if 'code' in query else 'fail.html')
with open(landing_file, 'rb') as html_file:
self.wfile.write(html_file.read())
def log_message(self, format, *args): # pylint: disable=redefined-builtin,unused-argument,no-self-use
pass # this prevent http server from dumping messages to stdout
reply_url = None
# On Windows, HTTPServer by default doesn't throw error if the port is in-use
# https://github.com/Azure/azure-cli/issues/10578
if is_windows():
logger.debug('Windows is detected. Set HTTPServer.allow_reuse_address to False')
ClientRedirectServer.allow_reuse_address = False
elif is_wsl():
logger.debug('WSL is detected. Set HTTPServer.allow_reuse_address to False')
ClientRedirectServer.allow_reuse_address = False
for port in range(8400, 9000):
try:
web_server = ClientRedirectServer(('localhost', port), ClientRedirectHandler)
reply_url = "http://localhost:{}".format(port)
break
except socket.error as ex:
logger.warning("Port '%s' is taken with error '%s'. Trying with the next one", port, ex)
except UnicodeDecodeError:
logger.warning("Please make sure there is no international (Unicode) character in the computer name "
r"or C:\Windows\System32\drivers\etc\hosts file's 127.0.0.1 entries. "
"For more details, please see https://github.com/Azure/azure-cli/issues/12957")
break
if reply_url is None:
logger.warning("Error: can't reserve a port for authentication reply url")
return
try:
request_state = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(20))
except NotImplementedError:
request_state = 'code'
# launch browser:
url = ('{0}/oauth2/authorize?response_type=code&client_id={1}'
'&redirect_uri={2}&state={3}&resource={4}&prompt=select_account')
url = url.format(authority_url, _CLIENT_ID, reply_url, request_state, resource)
logger.info('Open browser with url: %s', url)
succ = open_page_in_browser(url)
if succ is False:
web_server.server_close()
results['no_browser'] = True
return
# wait for callback from browser.
while True:
web_server.handle_request()
if 'error' in web_server.query_params or 'code' in web_server.query_params:
break
if 'error' in web_server.query_params:
logger.warning('Authentication Error: "%s". Description: "%s" ', web_server.query_params['error'],
web_server.query_params.get('error_description'))
return
if 'code' in web_server.query_params:
code = web_server.query_params['code']
else:
logger.warning('Authentication Error: Authorization code was not captured in query strings "%s"',
web_server.query_params)
return
if 'state' in web_server.query_params:
response_state = web_server.query_params['state'][0]
if response_state != request_state:
raise RuntimeError("mismatched OAuth state")
else:
raise RuntimeError("missing OAuth state")
results['code'] = code[0]
results['reply_url'] = reply_url
def _get_authorization_code(resource, authority_url):
import threading
import time
results = {}
t = threading.Thread(target=_get_authorization_code_worker,
args=(authority_url, resource, results))
t.daemon = True
t.start()
while True:
time.sleep(2) # so that ctrl+c can stop the command
if not t.is_alive():
break # done
if results.get('no_browser'):
raise RuntimeError()
return results
| 47.395706 | 124 | 0.638567 |
652b694c582acd1ebbc5f46d9464eec4ae7a9837 | 1,808 | py | Python | wads/__init__.py | i2mint/wads | fd46a4dcf456b714f8fbd34e8648042d84e9313d | [
"Apache-2.0"
] | null | null | null | wads/__init__.py | i2mint/wads | fd46a4dcf456b714f8fbd34e8648042d84e9313d | [
"Apache-2.0"
] | 4 | 2020-11-10T19:26:25.000Z | 2021-04-05T21:56:41.000Z | wads/__init__.py | i2mint/wads | fd46a4dcf456b714f8fbd34e8648042d84e9313d | [
"Apache-2.0"
] | null | null | null | """
Console Scripts
---------------
To see available commands
::
wads --help
"""
import setuptools # to avoid a warning due to distutils_patch.py
import os
import json
root_dir = os.path.dirname(__file__)
root_dir_name = os.path.basename(root_dir)
rjoin = lambda *paths: os.path.join(root_dir, *paths)
data_dir = rjoin('data')
licenses_json_path = rjoin(data_dir, 'github_licenses.json')
github_ci_tpl_path = rjoin(data_dir, 'github_ci_tpl.yml')
gitlab_ci_tpl_path = rjoin(data_dir, 'gitlab_ci_tpl.yml')
pkg_dir = os.path.dirname(root_dir)
pkg_join = lambda *paths: os.path.join(pkg_dir, *paths)
# TODO: Change to use ini format? (Or yaml or toml?)
wads_configs_file = rjoin(data_dir, 'wads_configs.json')
try:
wads_configs = json.load(open(wads_configs_file))
except FileNotFoundError:
wads_configs = {
'populate_dflts': {
'description': 'There is a bit of an air of mystery around this project...',
'root_url': None,
'author': None,
'license': 'mit',
'description_file': 'README.md',
'long_description': 'file:README.md',
'long_description_content_type': 'text/markdown',
'keywords': None,
'install_requires': None,
'verbose': None,
'version': '0.0.1',
}
}
pkg_path_names = ('.gitignore', 'setup.py')
pkg_paths = {pkg_join(name) for name in pkg_path_names}
from wads.populate import populate_pkg_dir
def main():
import argh # pip install argh
from wads.pack import argh_kwargs as pack_kw
# from wads.docs_gen import argh_kwargs as docs_gen_kw
parser = argh.ArghParser()
parser.add_commands(**pack_kw)
# parser.add_commands(**docs_gen_kw)
parser.dispatch()
if __name__ == '__main__':
main()
| 25.464789 | 88 | 0.659845 |
81b1605e43786d608edf6185371cf91018e35cb3 | 881 | py | Python | tests/test_primes.py | jonspock/vdf-competition | e77dc86f438d4e734c68f6b6aa12b30a3dc65475 | [
"Apache-2.0"
] | 97 | 2018-10-04T18:10:42.000Z | 2021-08-23T10:37:06.000Z | tests/test_primes.py | jonspock/vdf-competition | e77dc86f438d4e734c68f6b6aa12b30a3dc65475 | [
"Apache-2.0"
] | 4 | 2018-10-04T18:20:49.000Z | 2021-05-03T07:13:14.000Z | tests/test_primes.py | jonspock/vdf-competition | e77dc86f438d4e734c68f6b6aa12b30a3dc65475 | [
"Apache-2.0"
] | 17 | 2018-10-08T18:08:21.000Z | 2022-01-12T00:54:32.000Z | import heapq
import unittest
from inkfish import primes
def prime_iter(max_value=None):
yield 2
heap = [(4, 2)]
c = 2
while max_value is None or c < max_value:
c += 1
n, p = heap[0]
if n > c:
yield c
heapq.heappush(heap, (c+c, c))
while n <= c:
heapq.heapreplace(heap, (n+p, p))
n, p = heap[0]
class test_Primes(unittest.TestCase):
def test_odd_primes_below_n(self):
p1 = primes.odd_primes_below_n(15000)
self.assertEqual(p1, list(prime_iter(15000))[1:])
def test_miller_rabin_test(self):
p1 = primes.odd_primes_below_n(25000)
for p in p1:
mr = primes.miller_rabin_test(p)
self.assertTrue(mr)
for p in range(3, 25000):
mr = primes.miller_rabin_test(p)
self.assertEqual(mr, p in p1)
| 24.472222 | 57 | 0.568672 |
a9781d5c830416b5dab34277ca54d5725a19ca6a | 60,932 | py | Python | userbot/utils/google_images_download.py | Ncode2014/RBot | 9b7592bbb680f35a4d80111715bf3ac1be5869f8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/utils/google_images_download.py | Ncode2014/RBot | 9b7592bbb680f35a4d80111715bf3ac1be5869f8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/utils/google_images_download.py | Ncode2014/RBot | 9b7592bbb680f35a4d80111715bf3ac1be5869f8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | #!/usr/bin/env python
# In[ ]:
# coding: utf-8
###### Searching and Downloading Google Images to the local disk ######
import argparse
# Import Libraries
import codecs
import datetime
import http.client
import json
import os
import re
import ssl
import sys
import time # Importing the time library to check the time of code execution
import urllib.request
from http.client import BadStatusLine
from urllib.parse import quote
from urllib.request import HTTPError, Request, URLError, urlopen
http.client._MAXHEADERS = 1000
args_list = [
"keywords",
"keywords_from_file",
"prefix_keywords",
"suffix_keywords",
"limit",
"format",
"color",
"color_type",
"usage_rights",
"size",
"exact_size",
"aspect_ratio",
"type",
"time",
"time_range",
"delay",
"url",
"single_image",
"output_directory",
"image_directory",
"no_directory",
"proxy",
"similar_images",
"specific_site",
"print_urls",
"print_size",
"print_paths",
"metadata",
"extract_metadata",
"socket_timeout",
"thumbnail",
"thumbnail_only",
"language",
"prefix",
"chromedriver",
"related_images",
"safe_search",
"no_numbering",
"offset",
"no_download",
"save_source",
"silent_mode",
"ignore_urls",
]
def user_input():
config = argparse.ArgumentParser()
config.add_argument(
"-cf",
"--config_file",
help="config file name",
default="",
type=str,
required=False,
)
config_file_check = config.parse_known_args()
object_check = vars(config_file_check[0])
records = []
if object_check["config_file"] != "":
json_file = json.load(open(config_file_check[0].config_file))
for item in json_file["Records"]:
arguments = {i: None for i in args_list}
for key, value in item.items():
arguments[key] = value
records.append(arguments)
len(records)
else:
# Taking command line arguments from users
parser = argparse.ArgumentParser()
parser.add_argument(
"-k",
"--keywords",
help="delimited list input",
type=str,
required=False)
parser.add_argument(
"-kf",
"--keywords_from_file",
help="extract list of keywords from a text file",
type=str,
required=False,
)
parser.add_argument(
"-sk",
"--suffix_keywords",
help="comma separated additional words added after to main keyword",
type=str,
required=False,
)
parser.add_argument(
"-pk",
"--prefix_keywords",
help="comma separated additional words added before main keyword",
type=str,
required=False,
)
parser.add_argument(
"-l",
"--limit",
help="delimited list input",
type=str,
required=False)
parser.add_argument(
"-f",
"--format",
help="download images with specific format",
type=str,
required=False,
choices=["jpg", "gif", "png", "bmp", "svg", "webp", "ico"],
)
parser.add_argument(
"-u",
"--url",
help="search with google image URL",
type=str,
required=False)
parser.add_argument(
"-x",
"--single_image",
help="downloading a single image from URL",
type=str,
required=False,
)
parser.add_argument(
"-o",
"--output_directory",
help="download images in a specific main directory",
type=str,
required=False,
)
parser.add_argument(
"-i",
"--image_directory",
help="download images in a specific sub-directory",
type=str,
required=False,
)
parser.add_argument(
"-n",
"--no_directory",
default=False,
help="download images in the main directory but no sub-directory",
action="store_true",
)
parser.add_argument(
"-d",
"--delay",
help="delay in seconds to wait between downloading two images",
type=int,
required=False,
)
parser.add_argument(
"-co",
"--color",
help="filter on color",
type=str,
required=False,
choices=[
"red",
"orange",
"yellow",
"green",
"teal",
"blue",
"purple",
"pink",
"white",
"gray",
"black",
"brown",
],
)
parser.add_argument(
"-ct",
"--color_type",
help="filter on color",
type=str,
required=False,
choices=["full-color", "black-and-white", "transparent"],
)
parser.add_argument(
"-r",
"--usage_rights",
help="usage rights",
type=str,
required=False,
choices=[
"labeled-for-reuse-with-modifications",
"labeled-for-reuse",
"labeled-for-noncommercial-reuse-with-modification",
"labeled-for-nocommercial-reuse",
],
)
parser.add_argument(
"-s",
"--size",
help="image size",
type=str,
required=False,
choices=[
"large",
"medium",
"icon",
">400*300",
">640*480",
">800*600",
">1024*768",
">2MP",
">4MP",
">6MP",
">8MP",
">10MP",
">12MP",
">15MP",
">20MP",
">40MP",
">70MP",
],
)
parser.add_argument(
"-es",
"--exact_size",
help='exact image resolution "WIDTH,HEIGHT"',
type=str,
required=False,
)
parser.add_argument(
"-t",
"--type",
help="image type",
type=str,
required=False,
choices=["face", "photo", "clipart", "line-drawing", "animated"],
)
parser.add_argument(
"-w",
"--time",
help="image age",
type=str,
required=False,
choices=[
"past-24-hours",
"past-7-days",
"past-month",
"past-year"],
)
parser.add_argument(
"-wr",
"--time_range",
help='time range for the age of the image. should be in the format {"time_min":"MM/DD/YYYY","time_max":"MM/DD/YYYY"}',
type=str,
required=False,
)
parser.add_argument(
"-a",
"--aspect_ratio",
help="comma separated additional words added to keywords",
type=str,
required=False,
choices=["tall", "square", "wide", "panoramic"],
)
parser.add_argument(
"-si",
"--similar_images",
help="downloads images very similar to the image URL you provide",
type=str,
required=False,
)
parser.add_argument(
"-ss",
"--specific_site",
help="downloads images that are indexed from a specific website",
type=str,
required=False,
)
parser.add_argument(
"-p",
"--print_urls",
default=False,
help="Print the URLs of the images",
action="store_true",
)
parser.add_argument(
"-ps",
"--print_size",
default=False,
help="Print the size of the images on disk",
action="store_true",
)
parser.add_argument(
"-pp",
"--print_paths",
default=False,
help="Prints the list of absolute paths of the images",
action="store_true",
)
parser.add_argument(
"-m",
"--metadata",
default=False,
help="Print the metadata of the image",
action="store_true",
)
parser.add_argument(
"-e",
"--extract_metadata",
default=False,
help="Dumps all the logs into a text file",
action="store_true",
)
parser.add_argument(
"-st",
"--socket_timeout",
default=False,
help="Connection timeout waiting for the image to download",
type=float,
)
parser.add_argument(
"-th",
"--thumbnail",
default=False,
help="Downloads image thumbnail along with the actual image",
action="store_true",
)
parser.add_argument(
"-tho",
"--thumbnail_only",
default=False,
help="Downloads only thumbnail without downloading actual images",
action="store_true",
)
parser.add_argument(
"-la",
"--language",
default=False,
help="Defines the language filter. The search results are authomatically returned in that language",
type=str,
required=False,
choices=[
"Arabic",
"Chinese (Simplified)",
"Chinese (Traditional)",
"Czech",
"Danish",
"Dutch",
"English",
"Estonian",
"Finnish",
"French",
"German",
"Greek",
"Hebrew",
"Hungarian",
"Icelandic",
"Italian",
"Japanese",
"Korean",
"Latvian",
"Lithuanian",
"Norwegian",
"Portuguese",
"Polish",
"Romanian",
"Russian",
"Spanish",
"Swedish",
"Turkish",
],
)
parser.add_argument(
"-pr",
"--prefix",
default=False,
help="A word that you would want to prefix in front of each image name",
type=str,
required=False,
)
parser.add_argument(
"-px",
"--proxy",
help="specify a proxy address and port",
type=str,
required=False,
)
parser.add_argument(
"-cd",
"--chromedriver",
help="specify the path to chromedriver executable in your local machine",
type=str,
required=False,
)
parser.add_argument(
"-ri",
"--related_images",
default=False,
help="Downloads images that are similar to the keyword provided",
action="store_true",
)
parser.add_argument(
"-sa",
"--safe_search",
default=False,
help="Turns on the safe search filter while searching for images",
action="store_true",
)
parser.add_argument(
"-nn",
"--no_numbering",
default=False,
help="Allows you to exclude the default numbering of images",
action="store_true",
)
parser.add_argument(
"-of",
"--offset",
help="Where to start in the fetched links",
type=str,
required=False,
)
parser.add_argument(
"-nd",
"--no_download",
default=False,
help="Prints the URLs of the images and/or thumbnails without downloading them",
action="store_true",
)
parser.add_argument(
"-iu",
"--ignore_urls",
default=False,
help="delimited list input of image urls/keywords to ignore",
type=str,
)
parser.add_argument(
"-sil",
"--silent_mode",
default=False,
help="Remains silent. Does not print notification messages on the terminal",
action="store_true",
)
parser.add_argument(
"-is",
"--save_source",
help="creates a text file containing a list of downloaded images along with source page url",
type=str,
required=False,
)
args = parser.parse_args()
arguments = vars(args)
records.append(arguments)
return records
class googleimagesdownload:
def __init__(self):
pass
# Downloading entire Web Document (Raw Page Content)
def download_page(self, url):
try:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"
}
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
return str(resp.read())
except Exception:
print(
"Could not open URL. Please check your internet connection and/or ssl settings \n"
"If you are using proxy, make sure your proxy settings is configured correctly")
sys.exit()
# Download Page for more than 100 images
def download_extended_page(self, url, chromedriver):
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--headless")
try:
browser = webdriver.Chrome(chromedriver, chrome_options=options)
except Exception as e:
print(
"Looks like we cannot locate the path the 'chromedriver' (use the '--chromedriver' "
"argument to specify the path to the executable.) or google chrome browser is not "
"installed on your machine (exception: %s)" %
e)
sys.exit()
browser.set_window_size(1024, 768)
# Open the link
browser.get(url)
time.sleep(1)
print("Getting you a lot of images. This may take a few moments...")
element = browser.find_element_by_tag_name("body")
# Scroll down
for i in range(30):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3)
try:
browser.find_element_by_id("smb").click()
for _ in range(50):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
except BaseException:
for _ in range(10):
element.send_keys(Keys.PAGE_DOWN)
time.sleep(0.3) # bot id protection
print("Reached end of Page.")
time.sleep(0.5)
source = browser.page_source # page source
# close the browser
browser.close()
return source
# Correcting the escape characters for python2
def replace_with_byte(self, match):
return chr(int(match.group(0)[1:], 8))
def repair(self, brokenjson):
# up to 3 digits for byte values up to FF
invalid_escape = re.compile(r"\\[0-7]{1,3}")
return invalid_escape.sub(self.replace_with_byte, brokenjson)
# Finding 'Next Image' from the given raw page
def get_next_tab(self, s):
start_line = s.find('class="dtviD"')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_tabs"
return link, "", end_quote
start_line = s.find('class="dtviD"')
start_content = s.find('href="', start_line + 1)
end_content = s.find('">', start_content + 1)
url_item = "https://www.google.com" + \
str(s[start_content + 6: end_content])
url_item = url_item.replace("&", "&")
start_line_2 = s.find('class="dtviD"')
s = s.replace("&", "&")
start_content_2 = s.find(":", start_line_2 + 1)
end_content_2 = s.find("&usg=", start_content_2 + 1)
url_item_name = str(s[start_content_2 + 1: end_content_2])
chars = url_item_name.find(",g_1:")
chars_end = url_item_name.find(":", chars + 6)
if chars_end == -1:
updated_item_name = (url_item_name[chars + 5:]).replace("+", " ")
else:
updated_item_name = (
url_item_name[chars + 5: chars_end]).replace("+", " ")
return url_item, updated_item_name, end_content
# Getting all links with the help of '_images_get_next_image'
def get_all_tabs(self, page):
tabs = {}
while True:
item, item_name, end_content = self.get_next_tab(page)
if item == "no_tabs":
break
if len(item_name) > 100 or item_name == "background-color":
break
# Append all the links in the list named 'Links'
tabs[item_name] = item
# Timer could be used to slow down the request for image
# downloads
time.sleep(0.1)
page = page[end_content:]
return tabs
# Format the object in readable format
def format_object(self, object):
data = object[1]
main = data[3]
info = data[9]
return {
"image_height": main[2],
"image_width": main[1],
"image_link": main[0],
"image_format": main[0][-1 * (len(main[0]) - main[0].rfind(".") - 1):],
"image_description": info["2003"][3],
"image_host": info["183836587"][0],
"image_source": info["2003"][2],
"image_thumbnail_url": data[2][0],
}
# function to download single image
def single_image(self, image_url):
main_directory = "downloads"
extensions = (".jpg", ".gif", ".png", ".bmp", ".svg", ".webp", ".ico")
url = image_url
try:
os.makedirs(main_directory)
except OSError as e:
if e.errno != 17:
raise
req = Request(
url,
headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
},
)
response = urlopen(req, None, 10)
data = response.read()
response.close()
image_name = str(url[(url.rfind("/")) + 1:])
if "?" in image_name:
image_name = image_name[: image_name.find("?")]
# if ".jpg" in image_name or ".gif" in image_name or ".png" in
# image_name or ".bmp" in image_name or ".svg" in image_name or ".webp"
# in image_name or ".ico" in image_name:
if any(map(lambda extension: extension in image_name, extensions)):
file_name = main_directory + "/" + image_name
else:
file_name = main_directory + "/" + image_name + ".jpg"
image_name = image_name + ".jpg"
try:
with open(file_name, "wb") as output_file:
output_file.write(data)
except OSError as e:
raise e
except OSError as e:
raise e
print(
"completed ====> " +
image_name.encode("raw_unicode_escape").decode("utf-8"))
def similar_images(self, similar_images):
try:
searchUrl = (
"https://www.google.com/searchbyimage?site=search&sa=X&image_url=" +
similar_images)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"}
req1 = urllib.request.Request(searchUrl, headers=headers)
resp1 = urllib.request.urlopen(req1)
content = str(resp1.read())
l1 = content.find("AMhZZ")
l2 = content.find("&", l1)
urll = content[l1:l2]
newurl = (
"https://www.google.com/search?tbs=sbi:" +
urll +
"&site=search&sa=X")
req2 = urllib.request.Request(newurl, headers=headers)
urllib.request.urlopen(req2)
l3 = content.find("/search?sa=X&q=")
l4 = content.find(";", l3 + 19)
return content[l3 + 19: l4]
except BaseException:
return "Cloud not connect to Google Images endpoint"
# Building URL parameters
def build_url_parameters(self, arguments):
if arguments["language"]:
lang = "&lr="
lang_param = {
"Arabic": "lang_ar",
"Chinese (Simplified)": "lang_zh-CN",
"Chinese (Traditional)": "lang_zh-TW",
"Czech": "lang_cs",
"Danish": "lang_da",
"Dutch": "lang_nl",
"English": "lang_en",
"Estonian": "lang_et",
"Finnish": "lang_fi",
"French": "lang_fr",
"German": "lang_de",
"Greek": "lang_el",
"Hebrew": "lang_iw ",
"Hungarian": "lang_hu",
"Icelandic": "lang_is",
"Italian": "lang_it",
"Japanese": "lang_ja",
"Korean": "lang_ko",
"Latvian": "lang_lv",
"Lithuanian": "lang_lt",
"Norwegian": "lang_no",
"Portuguese": "lang_pt",
"Polish": "lang_pl",
"Romanian": "lang_ro",
"Russian": "lang_ru",
"Spanish": "lang_es",
"Swedish": "lang_sv",
"Turkish": "lang_tr",
}
lang_url = lang + lang_param[arguments["language"]]
else:
lang_url = ""
if arguments["time_range"]:
json_acceptable_string = arguments["time_range"].replace("'", '"')
d = json.loads(json_acceptable_string)
time_range = ",cdr:1,cd_min:" + \
d["time_min"] + ",cd_max:" + d["time_max"]
else:
time_range = ""
if arguments["exact_size"]:
size_array = [x.strip()
for x in arguments["exact_size"].split(",")]
exact_size = (",isz:ex,iszw:" +
str(size_array[0]) +
",iszh:" +
str(size_array[1]))
else:
exact_size = ""
built_url = "&tbs="
counter = 0
params = {"color": [arguments["color"],
{"red": "ic:specific,isc:red",
"orange": "ic:specific,isc:orange",
"yellow": "ic:specific,isc:yellow",
"green": "ic:specific,isc:green",
"teal": "ic:specific,isc:teel",
"blue": "ic:specific,isc:blue",
"purple": "ic:specific,isc:purple",
"pink": "ic:specific,isc:pink",
"white": "ic:specific,isc:white",
"gray": "ic:specific,isc:gray",
"black": "ic:specific,isc:black",
"brown": "ic:specific,isc:brown",
},
],
"color_type": [arguments["color_type"],
{"full-color": "ic:color",
"black-and-white": "ic:gray",
"transparent": "ic:trans",
},
],
"usage_rights": [arguments["usage_rights"],
{"labeled-for-reuse-with-modifications": "sur:fmc",
"labeled-for-reuse": "sur:fc",
"labeled-for-noncommercial-reuse-with-modification": "sur:fm",
"labeled-for-nocommercial-reuse": "sur:f",
},
],
"size": [arguments["size"],
{"large": "isz:l",
"medium": "isz:m",
"icon": "isz:i",
">400*300": "isz:lt,islt:qsvga",
">640*480": "isz:lt,islt:vga",
">800*600": "isz:lt,islt:svga",
">1024*768": "visz:lt,islt:xga",
">2MP": "isz:lt,islt:2mp",
">4MP": "isz:lt,islt:4mp",
">6MP": "isz:lt,islt:6mp",
">8MP": "isz:lt,islt:8mp",
">10MP": "isz:lt,islt:10mp",
">12MP": "isz:lt,islt:12mp",
">15MP": "isz:lt,islt:15mp",
">20MP": "isz:lt,islt:20mp",
">40MP": "isz:lt,islt:40mp",
">70MP": "isz:lt,islt:70mp",
},
],
"type": [arguments["type"],
{"face": "itp:face",
"photo": "itp:photo",
"clipart": "itp:clipart",
"line-drawing": "itp:lineart",
"animated": "itp:animated",
},
],
"time": [arguments["time"],
{"past-24-hours": "qdr:d",
"past-7-days": "qdr:w",
"past-month": "qdr:m",
"past-year": "qdr:y",
},
],
"aspect_ratio": [arguments["aspect_ratio"],
{"tall": "iar:t",
"square": "iar:s",
"wide": "iar:w",
"panoramic": "iar:xw",
},
],
"format": [arguments["format"],
{"jpg": "ift:jpg",
"gif": "ift:gif",
"png": "ift:png",
"bmp": "ift:bmp",
"svg": "ift:svg",
"webp": "webp",
"ico": "ift:ico",
"raw": "ift:craw",
},
],
}
for key, value in params.items():
if value[0] is not None:
ext_param = value[1][value[0]]
# counter will tell if it is first param added or not
if counter == 0:
# add it to the built url
built_url += ext_param
else:
built_url = built_url + "," + ext_param
counter += 1
built_url = lang_url + built_url + exact_size + time_range
return built_url
# building main search URL
def build_search_url(
self,
search_term,
params,
url,
similar_images,
specific_site,
safe_search):
# check the args and choose the URL
if url:
url = url
elif similar_images:
print(similar_images)
keywordem = self.similar_images(similar_images)
url = (
"https://www.google.com/search?q=" +
keywordem +
"&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg")
elif specific_site:
url = (
"https://www.google.com/search?q="
+ quote(search_term.encode("utf-8"))
+ "&as_sitesearch="
+ specific_site
+ "&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch"
+ params
+ "&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg"
)
else:
url = (
"https://www.google.com/search?q="
+ quote(search_term.encode("utf-8"))
+ "&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch"
+ params
+ "&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg"
)
# safe search check
if safe_search:
# check safe_search
safe_search_string = "&safe=active"
url = url + safe_search_string
return url
# measures the file size
def file_size(self, file_path):
if os.path.isfile(file_path):
file_info = os.stat(file_path)
size = file_info.st_size
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if size < 1024.0:
return f"{size:3.1f} {x}"
size /= 1024.0
return size
# keywords from file
def keywords_from_file(self, file_name):
search_keyword = []
with codecs.open(file_name, "r", encoding="utf-8-sig") as f:
if ".csv" in file_name or ".txt" in file_name:
for line in f:
if line not in ["\n", "\r\n"]:
search_keyword.append(
line.replace(
"\n", "").replace(
"\r", ""))
else:
print(
"Invalid file type: Valid file types are either .txt or .csv \n"
"exiting...")
sys.exit()
return search_keyword
# make directories
def create_directories(
self,
main_directory,
dir_name,
thumbnail,
thumbnail_only):
dir_name_thumbnail = dir_name + " - thumbnail"
# make a search keyword directory
try:
if not os.path.exists(main_directory):
os.makedirs(main_directory)
time.sleep(0.15)
path = dir_name
sub_directory = os.path.join(main_directory, path)
if not os.path.exists(sub_directory):
os.makedirs(sub_directory)
if thumbnail or thumbnail_only:
sub_directory_thumbnail = os.path.join(
main_directory, dir_name_thumbnail
)
if not os.path.exists(sub_directory_thumbnail):
os.makedirs(sub_directory_thumbnail)
except OSError as e:
if e.errno != 17:
raise
# Download Image thumbnails
def download_image_thumbnail(
self,
image_url,
main_directory,
dir_name,
return_image_name,
print_urls,
socket_timeout,
print_size,
no_download,
save_source,
img_src,
ignore_urls,
):
if print_urls or no_download:
print("Image URL: " + image_url)
if no_download:
return "success", "Printed url without downloading"
try:
req = Request(
image_url,
headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
},
)
try:
# timeout time to download an image
timeout = float(socket_timeout) if socket_timeout else 10
response = urlopen(req, None, timeout)
data = response.read()
response.close()
path = (
main_directory
+ "/"
+ dir_name
+ " - thumbnail"
+ "/"
+ return_image_name
)
try:
with open(path, "wb") as output_file:
output_file.write(data)
if save_source:
list_path = main_directory + "/" + save_source + ".txt"
with open(list_path, "a") as list_file:
list_file.write(path + "\t" + img_src + "\n")
except OSError as e:
download_status = "fail"
download_message = (
"OSError on an image...trying next one..." + " Error: " + str(e))
except OSError as e:
download_status = "fail"
download_message = (
"IOError on an image...trying next one..." + " Error: " + str(e))
download_status = "success"
download_message = (
"Completed Image Thumbnail ====> " + return_image_name
)
# image size parameter
if print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = "fail"
download_message = (
"UnicodeEncodeError on an image...trying next one..."
+ " Error: "
+ str(e)
)
except HTTPError as e: # If there is any HTTPError
download_status = "fail"
download_message = (
"HTTPError on an image...trying next one..." +
" Error: " +
str(e))
except URLError as e:
download_status = "fail"
download_message = (
"URLError on an image...trying next one..." +
" Error: " +
str(e))
except ssl.CertificateError as e:
download_status = "fail"
download_message = (
"CertificateError on an image...trying next one..."
+ " Error: "
+ str(e)
)
except OSError as e: # If there is any IOError
download_status = "fail"
download_message = (
"IOError on an image...trying next one..." +
" Error: " +
str(e))
return download_status, download_message
# Download Images
def download_image(
self,
image_url,
image_format,
main_directory,
dir_name,
count,
print_urls,
socket_timeout,
prefix,
print_size,
no_numbering,
no_download,
save_source,
img_src,
silent_mode,
thumbnail_only,
format,
ignore_urls,
):
if not silent_mode and (print_urls or no_download):
print("Image URL: " + image_url)
if ignore_urls and any(
url in image_url for url in ignore_urls.split(",")):
return (
"fail",
"Image ignored due to 'ignore url' parameter",
None,
image_url,
)
if thumbnail_only:
return (
"success",
"Skipping image download...",
str(image_url[(image_url.rfind("/")) + 1:]),
image_url,
)
if no_download:
return "success", "Printed url without downloading", None, image_url
try:
req = Request(
image_url,
headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
},
)
try:
# timeout time to download an image
timeout = float(socket_timeout) if socket_timeout else 10
response = urlopen(req, None, timeout)
data = response.read()
response.close()
extensions = [
".jpg",
".jpeg",
".gif",
".png",
".bmp",
".svg",
".webp",
".ico",
]
# keep everything after the last '/'
image_name = str(image_url[(image_url.rfind("/")) + 1:])
if format:
if not image_format or image_format != format:
download_status = "fail"
download_message = "Wrong image format returned. Skipping..."
return_image_name = ""
absolute_path = ""
return (
download_status,
download_message,
return_image_name,
absolute_path,
)
if (
image_format == ""
or not image_format
or "." + image_format not in extensions
):
download_status = "fail"
download_message = "Invalid or missing image format. Skipping..."
return_image_name = ""
absolute_path = ""
return (
download_status,
download_message,
return_image_name,
absolute_path,
)
if image_name.lower().find("." + image_format) < 0:
image_name = image_name + "." + image_format
else:
image_name = image_name[
: image_name.lower().find("." + image_format)
+ (len(image_format) + 1)
]
# prefix name in image
prefix = prefix + " " if prefix else ""
if no_numbering:
path = main_directory + "/" + dir_name + "/" + prefix + image_name
else:
path = (
main_directory
+ "/"
+ dir_name
+ "/"
+ prefix
+ str(count)
+ "."
+ image_name
)
try:
with open(path, "wb") as output_file:
output_file.write(data)
if save_source:
list_path = main_directory + "/" + save_source + ".txt"
with open(list_path, "a") as list_file:
list_file.write(path + "\t" + img_src + "\n")
absolute_path = os.path.abspath(path)
except OSError as e:
download_status = "fail"
download_message = (
"OSError on an image...trying next one..." + " Error: " + str(e))
return_image_name = ""
absolute_path = ""
# return image name back to calling method to use it for
# thumbnail downloads
download_status = "success"
download_message = (
"Completed Image ====> " +
prefix +
str(count) +
"." +
image_name)
return_image_name = prefix + str(count) + "." + image_name
# image size parameter
if not silent_mode and print_size:
print("Image Size: " + str(self.file_size(path)))
except UnicodeEncodeError as e:
download_status = "fail"
download_message = (
"UnicodeEncodeError on an image...trying next one..."
+ " Error: "
+ str(e)
)
return_image_name = ""
absolute_path = ""
except URLError as e:
download_status = "fail"
download_message = (
"URLError on an image...trying next one..." +
" Error: " +
str(e))
return_image_name = ""
absolute_path = ""
except BadStatusLine as e:
download_status = "fail"
download_message = (
"BadStatusLine on an image...trying next one..."
+ " Error: "
+ str(e)
)
return_image_name = ""
absolute_path = ""
except HTTPError as e: # If there is any HTTPError
download_status = "fail"
download_message = (
"HTTPError on an image...trying next one..." +
" Error: " +
str(e))
return_image_name = ""
absolute_path = ""
except URLError as e:
download_status = "fail"
download_message = (
"URLError on an image...trying next one..." +
" Error: " +
str(e))
return_image_name = ""
absolute_path = ""
except ssl.CertificateError as e:
download_status = "fail"
download_message = (
"CertificateError on an image...trying next one..."
+ " Error: "
+ str(e)
)
return_image_name = ""
absolute_path = ""
except OSError as e: # If there is any IOError
download_status = "fail"
download_message = (
"IOError on an image...trying next one..." +
" Error: " +
str(e))
return_image_name = ""
absolute_path = ""
except IncompleteRead as e:
download_status = "fail"
download_message = (
"IncompleteReadError on an image...trying next one..."
+ " Error: "
+ str(e)
)
return_image_name = ""
absolute_path = ""
return download_status, download_message, return_image_name, absolute_path
# Finding 'Next Image' from the given raw page
def _get_next_item(self, s):
start_line = s.find("rg_meta notranslate")
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
start_line = s.find('class="rg_meta notranslate">')
start_object = s.find("{", start_line + 1)
end_object = s.find("</div>", start_object + 1)
object_raw = str(s[start_object:end_object])
# remove escape characters based on python version
try:
object_decode = bytes(object_raw, "utf-8").decode("unicode_escape")
final_object = json.loads(object_decode)
except BaseException:
final_object = ""
return final_object, end_object
# Getting all links with the help of '_images_get_next_image'
def _get_image_objects(self, s):
start_line = s.find("AF_initDataCallback({key: \\'ds:1\\'") - 10
start_object = s.find("[", start_line + 1)
end_object = s.find("</script>", start_object + 1) - 4
object_raw = str(s[start_object:end_object])
object_decode = bytes(object_raw[:-1],
"utf-8").decode("unicode_escape")
# LOGS.info(_format.paste_text(object_decode[:-15]))
return json.loads(object_decode[:-15])[31][0][12][2]
def _get_all_items(self, page, main_directory, dir_name, limit, arguments):
items = []
abs_path = []
errorCount = 0
i = 0
count = 1
# LOGS.info(f"page : {_format.paste_text(page)}")
image_objects = self._get_image_objects(page)
while count < limit + 1:
if len(image_objects) == 0:
print("no_links")
break
else:
# format the item for readability
object = self.format_object(image_objects[i])
if arguments["metadata"] and not arguments["silent_mode"]:
print("\nImage Metadata: " + str(object))
# download the images
(
download_status,
download_message,
return_image_name,
absolute_path,
) = self.download_image(
object["image_link"],
object["image_format"],
main_directory,
dir_name,
count,
arguments["print_urls"],
arguments["socket_timeout"],
arguments["prefix"],
arguments["print_size"],
arguments["no_numbering"],
arguments["no_download"],
arguments["save_source"],
object["image_source"],
arguments["silent_mode"],
arguments["thumbnail_only"],
arguments["format"],
arguments["ignore_urls"],
)
if not arguments["silent_mode"]:
print(download_message)
if download_status == "success":
# download image_thumbnails
if arguments["thumbnail"] or arguments["thumbnail_only"]:
(
download_status,
download_message_thumbnail,
) = self.download_image_thumbnail(
object["image_thumbnail_url"],
main_directory,
dir_name,
return_image_name,
arguments["print_urls"],
arguments["socket_timeout"],
arguments["print_size"],
arguments["no_download"],
arguments["save_source"],
object["image_source"],
arguments["ignore_urls"],
)
if not arguments["silent_mode"]:
print(download_message_thumbnail)
count += 1
object["image_filename"] = return_image_name
# Append all the links in the list named 'Links'
items.append(object)
abs_path.append(absolute_path)
else:
errorCount += 1
# delay param
if arguments["delay"]:
time.sleep(int(arguments["delay"]))
i += 1
if count < limit:
print("\n\nUnfortunately all " +
str(limit) +
" could not be downloaded because some images were not downloadable. " +
str(count -
1) +
" is all we got for this search filter!")
return items, errorCount, abs_path
# Bulk Download
def download(self, arguments):
paths_agg = {}
# for input coming from other python files
if __name__ != "__main__":
# if the calling file contains config_file param
if "config_file" in arguments:
records = []
json_file = json.load(open(arguments["config_file"]))
for item in json_file["Records"]:
arguments = {}
for i in args_list:
arguments[i] = None
for key, value in item.items():
arguments[key] = value
records.append(arguments)
total_errors = 0
for rec in records:
paths, errors = self.download_executor(rec)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"] and arguments["print_paths"]:
print(
paths.encode("raw_unicode_escape").decode("utf-8"))
total_errors += errors
return paths_agg, total_errors
# if the calling file contains params directly
paths, errors = self.download_executor(arguments)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"] and arguments["print_paths"]:
print(paths.encode("raw_unicode_escape").decode("utf-8"))
return paths_agg, errors
# for input coming from CLI
paths, errors = self.download_executor(arguments)
for i in paths:
paths_agg[i] = paths[i]
if not arguments["silent_mode"] and arguments["print_paths"]:
print(paths.encode("raw_unicode_escape").decode("utf-8"))
return paths_agg, errors
def download_executor(self, arguments):
paths = {}
errorCount = None
for arg in args_list:
if arg not in arguments:
arguments[arg] = None
# Initialization and Validation of user arguments
if arguments["keywords"]:
search_keyword = [str(item)
for item in arguments["keywords"].split(",")]
if arguments["keywords_from_file"]:
search_keyword = self.keywords_from_file(
arguments["keywords_from_file"])
# both time and time range should not be allowed in the same query
if arguments["time"] and arguments["time_range"]:
raise ValueError(
"Either time or time range should be used in a query. Both cannot be used at the same time."
)
# both time and time range should not be allowed in the same query
if arguments["size"] and arguments["exact_size"]:
raise ValueError(
'Either "size" or "exact_size" should be used in a query. Both cannot be used at the same time.'
)
# both image directory and no image directory should not be allowed in
# the same query
if arguments["image_directory"] and arguments["no_directory"]:
raise ValueError(
"You can either specify image directory or specify no image directory, not both!"
)
# Additional words added to keywords
if arguments["suffix_keywords"]:
suffix_keywords = [
" " + str(sk) for sk in arguments["suffix_keywords"].split(",")
]
else:
suffix_keywords = [""]
# Additional words added to keywords
if arguments["prefix_keywords"]:
prefix_keywords = [
str(sk) + " " for sk in arguments["prefix_keywords"].split(",")
]
else:
prefix_keywords = [""]
# Setting limit on number of images to be downloaded
limit = int(arguments["limit"]) if arguments["limit"] else 100
if arguments["url"]:
current_time = str(datetime.datetime.now()).split(".")[0]
search_keyword = [current_time.replace(":", "_")]
if arguments["similar_images"]:
current_time = str(datetime.datetime.now()).split(".")[0]
search_keyword = [current_time.replace(":", "_")]
# If single_image or url argument not present then keywords is
# mandatory argument
if (
arguments["single_image"] is None
and arguments["url"] is None
and arguments["similar_images"] is None
and arguments["keywords"] is None
and arguments["keywords_from_file"] is None
):
print(
"-------------------------------\n"
"Uh oh! Keywords is a required argument \n\n"
"Please refer to the documentation on guide to writing queries \n"
"https://github.com/hardikvasa/google-images-download#examples"
"\n\nexiting!\n"
"-------------------------------")
sys.exit()
# If this argument is present, set the custom output directory
if arguments["output_directory"]:
main_directory = arguments["output_directory"]
else:
main_directory = "downloads"
# Proxy settings
if arguments["proxy"]:
os.environ["http_proxy"] = arguments["proxy"]
os.environ["https_proxy"] = arguments["proxy"]
# Initialization Complete
total_errors = 0
for pky in prefix_keywords: # 1.for every prefix keywords
for sky in suffix_keywords: # 2.for every suffix keywords
for i in range(
len(search_keyword)): # 3.for every main keyword
iteration = (
"\n"
+ "Item no.: "
+ str(i + 1)
+ " -->"
+ " Item name = "
+ (pky)
+ (search_keyword[i])
+ (sky)
)
if arguments["silent_mode"]:
print(
"Downloading images for: "
+ (pky)
+ (search_keyword[i])
+ (sky)
+ " ..."
)
else:
print(
iteration.encode("raw_unicode_escape").decode("utf-8"))
print("Evaluating...")
search_term = pky + search_keyword[i] + sky
if arguments["image_directory"]:
dir_name = arguments["image_directory"]
elif arguments["no_directory"]:
dir_name = ""
else:
dir_name = search_term + (
"-" + arguments["color"] if arguments["color"] else ""
) # sub-directory
if not arguments["no_download"]:
self.create_directories(
main_directory,
dir_name,
arguments["thumbnail"],
arguments["thumbnail_only"],
) # create directories in OS
params = self.build_url_parameters(
arguments
) # building URL with params
url = self.build_search_url(
search_term,
params,
arguments["url"],
arguments["similar_images"],
arguments["specific_site"],
arguments["safe_search"],
) # building main search url
if limit < 101:
raw_html = self.download_page(url) # download page
else:
raw_html = self.download_extended_page(
url, arguments["chromedriver"]
)
if not arguments["silent_mode"]:
if arguments["no_download"]:
print("Getting URLs without downloading images...")
else:
print("Starting Download...")
items, errorCount, abs_path = self._get_all_items(
raw_html, main_directory, dir_name, limit, arguments
) # get all image items and download images
paths[pky + search_keyword[i] + sky] = abs_path
# dumps into a json file
if arguments["extract_metadata"]:
try:
if not os.path.exists("logs"):
os.makedirs("logs")
except OSError as e:
print(e)
with open(
"logs/" + search_keyword[i] + ".json", "w"
) as json_file:
json.dump(
items, json_file, indent=4, sort_keys=True)
# Related images
if arguments["related_images"]:
print(
"\nGetting list of related keywords...this may take a few moments"
)
tabs = self.get_all_tabs(raw_html)
for key, value in tabs.items():
final_search_term = search_term + " - " + key
print("\nNow Downloading - " + final_search_term)
if limit < 101:
new_raw_html = self.download_page(
value
) # download page
else:
new_raw_html = self.download_extended_page(
value, arguments["chromedriver"]
)
self.create_directories(
main_directory,
final_search_term,
arguments["thumbnail"],
arguments["thumbnail_only"],
)
self._get_all_items(
new_raw_html,
main_directory,
search_term + " - " + key,
limit,
arguments,
)
total_errors += errorCount
if not arguments["silent_mode"]:
print("\nErrors: " + str(errorCount) + "\n")
return paths, total_errors
# ------------- Main Program -------------#
def main():
records = user_input()
total_errors = 0
t0 = time.time() # start the timer
for arguments in records:
if arguments["single_image"]: # Download Single Image using a URL
response = googleimagesdownload()
response.single_image(arguments["single_image"])
else: # or download multiple images based on keywords/keyphrase search
response = googleimagesdownload()
# wrapping response in a variable just for consistency
paths, errors = response.download(arguments)
total_errors += errors
t1 = time.time() # stop the timer
# Calculating the total time required to crawl, find and download all
# the links of 60,000 images
total_time = t1 - t0
if not arguments["silent_mode"]:
print("\nEverything downloaded!")
print("Total errors: " + str(total_errors))
print("Total time taken: " + str(total_time) + " Seconds")
if __name__ == "__main__":
main()
| 35.948083 | 147 | 0.454326 |
f89e8cc4483d2b78cea7b7e4b095c7c0ae6b31de | 617 | py | Python | day_16/solution.py | juntuu/advent_of_code_2016 | c2849396b4bd7b0f0e9128624fcec20130bc243f | [
"MIT"
] | null | null | null | day_16/solution.py | juntuu/advent_of_code_2016 | c2849396b4bd7b0f0e9128624fcec20130bc243f | [
"MIT"
] | null | null | null | day_16/solution.py | juntuu/advent_of_code_2016 | c2849396b4bd7b0f0e9128624fcec20130bc243f | [
"MIT"
] | null | null | null | def double(a):
b = a[::-1]
return a + "0" + b.translate({ord("1"): "0", ord("0"): "1"})
def check_sum(s):
while len(s) % 2 == 0:
s = "".join("01"[a == b] for a, b in zip(*[iter(s)]*2))
return s
def part1(state):
n = 272
while len(state) < n:
state = double(state)
return check_sum(state[:n])
def part2(state):
n = 35651584
while len(state) < n:
state = double(state)
return check_sum(state[:n])
def main(_=None):
print("Day 16")
state = "01111010110010011"
A = part1(state)
print(f"{A=}")
B = part2(state)
print(f"{B=}")
| 18.69697 | 64 | 0.515397 |
f1ae123250321e4f09b4181a4e585e6913d877c7 | 10,198 | py | Python | python/pyspark/ml/base.py | rahij/spark | 82721ce00b6cf535abd3d9cd66445e452554d15d | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-09-16T09:42:07.000Z | 2019-10-29T20:38:27.000Z | python/pyspark/ml/base.py | rahij/spark | 82721ce00b6cf535abd3d9cd66445e452554d15d | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 118 | 2019-06-28T12:58:34.000Z | 2021-08-03T04:43:57.000Z | python/pyspark/ml/base.py | rahij/spark | 82721ce00b6cf535abd3d9cd66445e452554d15d | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2021-10-30T17:13:49.000Z | 2022-03-13T22:26:29.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod, abstractproperty
import copy
import threading
from pyspark import since
from pyspark.ml.common import inherit_doc
from pyspark.ml.param.shared import HasInputCol, HasOutputCol, HasLabelCol, HasFeaturesCol, \
HasPredictionCol, Params
from pyspark.sql.functions import udf
from pyspark.sql.types import StructField, StructType
class _FitMultipleIterator(object):
"""
Used by default implementation of Estimator.fitMultiple to produce models in a thread safe
iterator. This class handles the simple case of fitMultiple where each param map should be
fit independently.
:param fitSingleModel: Function: (int => Model) which fits an estimator to a dataset.
`fitSingleModel` may be called up to `numModels` times, with a unique index each time.
Each call to `fitSingleModel` with an index should return the Model associated with
that index.
:param numModel: Number of models this iterator should produce.
See Estimator.fitMultiple for more info.
"""
def __init__(self, fitSingleModel, numModels):
"""
"""
self.fitSingleModel = fitSingleModel
self.numModel = numModels
self.counter = 0
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
index = self.counter
if index >= self.numModel:
raise StopIteration("No models remaining.")
self.counter += 1
return index, self.fitSingleModel(index)
def next(self):
"""For python2 compatibility."""
return self.__next__()
@inherit_doc
class Estimator(Params, metaclass=ABCMeta):
"""
Abstract class for estimators that fit models to data.
.. versionadded:: 1.3.0
"""
pass
@abstractmethod
def _fit(self, dataset):
"""
Fits a model to the input dataset. This is called by the default implementation of fit.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
:returns: fitted model
"""
raise NotImplementedError()
@since("2.3.0")
def fitMultiple(self, dataset, paramMaps):
"""
Fits a model to the input dataset for each param map in `paramMaps`.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`.
:param paramMaps: A Sequence of param maps.
:return: A thread safe iterable which contains one model for each param map. Each
call to `next(modelIterator)` will return `(index, model)` where model was fit
using `paramMaps[index]`. `index` values may not be sequential.
"""
estimator = self.copy()
def fitSingleModel(index):
return estimator.fit(dataset, paramMaps[index])
return _FitMultipleIterator(fitSingleModel, len(paramMaps))
@since("1.3.0")
def fit(self, dataset, params=None):
"""
Fits a model to the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
:param params: an optional param map that overrides embedded params. If a list/tuple of
param maps is given, this calls fit on each param map and returns a list of
models.
:returns: fitted model(s)
"""
if params is None:
params = dict()
if isinstance(params, (list, tuple)):
models = [None] * len(params)
for index, model in self.fitMultiple(dataset, params):
models[index] = model
return models
elif isinstance(params, dict):
if params:
return self.copy(params)._fit(dataset)
else:
return self._fit(dataset)
else:
raise ValueError("Params must be either a param map or a list/tuple of param maps, "
"but got %s." % type(params))
@inherit_doc
class Transformer(Params, metaclass=ABCMeta):
"""
Abstract class for transformers that transform one dataset into another.
.. versionadded:: 1.3.0
"""
pass
@abstractmethod
def _transform(self, dataset):
"""
Transforms the input dataset.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
:returns: transformed dataset
"""
raise NotImplementedError()
@since("1.3.0")
def transform(self, dataset, params=None):
"""
Transforms the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
:param params: an optional param map that overrides embedded params.
:returns: transformed dataset
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params)._transform(dataset)
else:
return self._transform(dataset)
else:
raise ValueError("Params must be a param map but got %s." % type(params))
@inherit_doc
class Model(Transformer, metaclass=ABCMeta):
"""
Abstract class for models that are fitted by estimators.
.. versionadded:: 1.4.0
"""
pass
@inherit_doc
class UnaryTransformer(HasInputCol, HasOutputCol, Transformer):
"""
Abstract class for transformers that take one input column, apply transformation,
and output the result as a new column.
.. versionadded:: 2.3.0
"""
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
@abstractmethod
def createTransformFunc(self):
"""
Creates the transform function using the given param map. The input param map already takes
account of the embedded param map. So the param values should be determined
solely by the input param map.
"""
raise NotImplementedError()
@abstractmethod
def outputDataType(self):
"""
Returns the data type of the output column.
"""
raise NotImplementedError()
@abstractmethod
def validateInputType(self, inputType):
"""
Validates the input type. Throw an exception if it is invalid.
"""
raise NotImplementedError()
def transformSchema(self, schema):
inputType = schema[self.getInputCol()].dataType
self.validateInputType(inputType)
if self.getOutputCol() in schema.names:
raise ValueError("Output column %s already exists." % self.getOutputCol())
outputFields = copy.copy(schema.fields)
outputFields.append(StructField(self.getOutputCol(),
self.outputDataType(),
nullable=False))
return StructType(outputFields)
def _transform(self, dataset):
self.transformSchema(dataset.schema)
transformUDF = udf(self.createTransformFunc(), self.outputDataType())
transformedDataset = dataset.withColumn(self.getOutputCol(),
transformUDF(dataset[self.getInputCol()]))
return transformedDataset
@inherit_doc
class _PredictorParams(HasLabelCol, HasFeaturesCol, HasPredictionCol):
"""
Params for :py:class:`Predictor` and :py:class:`PredictorModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class Predictor(Estimator, _PredictorParams, metaclass=ABCMeta):
"""
Estimator for prediction tasks (regression and classification).
"""
@since("3.0.0")
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@inherit_doc
class PredictionModel(Model, _PredictorParams, metaclass=ABCMeta):
"""
Model for prediction tasks (regression and classification).
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@abstractproperty
@since("2.1.0")
def numFeatures(self):
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
raise NotImplementedError()
@abstractmethod
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
raise NotImplementedError()
| 31.86875 | 99 | 0.632281 |
8f99ed94c040dfb0b3464a043dd89679d47d3902 | 1,408 | py | Python | examples/restarts.py | jmmshn/jobflow | 996f77378915271ef8393659992249990b1ac234 | [
"BSD-3-Clause-LBNL"
] | null | null | null | examples/restarts.py | jmmshn/jobflow | 996f77378915271ef8393659992249990b1ac234 | [
"BSD-3-Clause-LBNL"
] | null | null | null | examples/restarts.py | jmmshn/jobflow | 996f77378915271ef8393659992249990b1ac234 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from typing import List
from jobflow import Flow, job, run_locally
@job
def read_websites():
from pathlib import Path
file_contents = Path("websites.txt").read_text()
return file_contents.split()
@job
def time_website(website: str):
import urllib.request
from time import perf_counter
stream = urllib.request.urlopen(website)
start_time = perf_counter()
stream.read()
end_time = perf_counter()
stream.close()
return end_time - start_time
@job
def start_timing_jobs(websites: List[str]):
from jobflow.core.job import Response
jobs = []
for website in websites:
time_job = time_website(website)
time_job.name = f"time {website}"
jobs.append(time_job)
output = [j.output for j in jobs]
return Response(replace=Flow(jobs, output))
@job
def sum_times(times: List[float]):
return sum(times)
# create a flow that will:
# 1. load a list of websites from a file
# 2. generate one new job for each website to time the website loading
# 3. sum all the times together
read_websites_job = read_websites()
timings_job = start_timing_jobs(read_websites_job.output)
sum_job = sum_times(timings_job.output)
flow = Flow([read_websites_job, timings_job, sum_job])
# draw the flow graph
flow.draw_graph().show()
# run the flow, "responses" contains the output of all jobs
responses = run_locally(flow)
print(responses)
| 23.081967 | 70 | 0.71946 |
bc4755cb302f50e60af92b4372a0f59ae971a7c0 | 711 | py | Python | Clustering_Snippets/T8_getClusters_spectral.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | Clustering_Snippets/T8_getClusters_spectral.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | Clustering_Snippets/T8_getClusters_spectral.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | '''
Mahnoor Anjum
Python:
Clusters
'''
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import random
filename = 'private'
path = 'data/' + filename + '.txt'
data = pd.read_csv(path, delimiter='\t')
columns = ['X','Y','Z']
X = data[columns]
data.isnull().sum()
from sklearn.preprocessing import MinMaxScaler
obj = MinMaxScaler()
X_scaled = obj.fit_transform(X)
from sklearn.cluster import SpectralClustering
model = SpectralClustering(n_clusters = 500)
y_pred = model.fit_predict(X_scaled)
clusters = np.unique(y_pred)
data = X.join(pd.DataFrame({'Cluster':y_pred}))
data.to_csv('data/clusters/' + filename + '.csv', index=None)
| 20.314286 | 62 | 0.714487 |
ef20e75711ae99e5183457f377ac6fac80f5cb29 | 4,046 | py | Python | examples/SpinBox.py | Tillsten/pyqtgraph | 0045863165fe526988c58cf4f8232ae2d261a5ee | [
"MIT"
] | null | null | null | examples/SpinBox.py | Tillsten/pyqtgraph | 0045863165fe526988c58cf4f8232ae2d261a5ee | [
"MIT"
] | null | null | null | examples/SpinBox.py | Tillsten/pyqtgraph | 0045863165fe526988c58cf4f8232ae2d261a5ee | [
"MIT"
] | 1 | 2021-04-10T06:04:47.000Z | 2021-04-10T06:04:47.000Z | # -*- coding: utf-8 -*-
"""
This example demonstrates the SpinBox widget, which is an extension of
QDoubleSpinBox providing some advanced features:
* SI-prefixed units
* Non-linear stepping modes
* Bounded/unbounded values
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
app = QtGui.QApplication([])
spins = [
("Floating-point spin box, min=0, no maximum.", pg.SpinBox(value=5.0, bounds=[0, None])),
("Integer spin box, dec stepping<br>(1-9, 10-90, 100-900, etc)", pg.SpinBox(value=10, int=True, dec=True, minStep=1, step=1)),
("Float with SI-prefixed units<br>(n, u, m, k, M, etc)", pg.SpinBox(value=0.9, suffix='V', siPrefix=True)),
("Float with SI-prefixed units,<br>dec step=0.1, minStep=0.1", pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=0.1, minStep=0.1)),
("Float with SI-prefixed units,<br>dec step=0.5, minStep=0.01", pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=0.5, minStep=0.01)),
("Float with SI-prefixed units,<br>dec step=1.0, minStep=0.001", pg.SpinBox(value=1.0, suffix='V', siPrefix=True, dec=True, step=1.0, minStep=0.001)),
]
win = QtGui.QMainWindow()
win.setWindowTitle('pyqtgraph example: SpinBox')
cw = QtGui.QWidget()
layout = QtGui.QGridLayout()
cw.setLayout(layout)
win.setCentralWidget(cw)
win.show()
#win.resize(300, 600)
changingLabel = QtGui.QLabel() ## updated immediately
changedLabel = QtGui.QLabel() ## updated only when editing is finished or mouse wheel has stopped for 0.3sec
changingLabel.setMinimumWidth(200)
font = changingLabel.font()
font.setBold(True)
font.setPointSize(14)
changingLabel.setFont(font)
changedLabel.setFont(font)
labels = []
def valueChanged(sb):
changedLabel.setText("Final value: %s" % str(sb.value()))
def valueChanging(sb, value):
changingLabel.setText("Value changing: %s" % str(sb.value()))
for text, spin in spins:
label = QtGui.QLabel(text)
labels.append(label)
layout.addWidget(label)
layout.addWidget(spin)
spin.sigValueChanged.connect(valueChanged)
spin.sigValueChanging.connect(valueChanging)
layout.addWidget(changingLabel, 0, 1)
layout.addWidget(changedLabel, 2, 1)
#def mkWin():
#win = QtGui.QMainWindow()
#g = QtGui.QFormLayout()
#w = QtGui.QWidget()
#w.setLayout(g)
#win.setCentralWidget(w)
#s1 = SpinBox(value=5, step=0.1, bounds=[-1.5, None], suffix='units')
#t1 = QtGui.QLineEdit()
#g.addRow(s1, t1)
#s2 = SpinBox(value=10e-6, dec=True, step=0.1, minStep=1e-6, suffix='A', siPrefix=True)
#t2 = QtGui.QLineEdit()
#g.addRow(s2, t2)
#s3 = SpinBox(value=1000, dec=True, step=0.5, minStep=1e-6, bounds=[1, 1e9], suffix='Hz', siPrefix=True)
#t3 = QtGui.QLineEdit()
#g.addRow(s3, t3)
#s4 = SpinBox(int=True, dec=True, step=1, minStep=1, bounds=[-10, 1000])
#t4 = QtGui.QLineEdit()
#g.addRow(s4, t4)
#win.show()
#import sys
#for sb in [s1, s2, s3,s4]:
##QtCore.QObject.connect(sb, QtCore.SIGNAL('valueChanged(double)'), lambda v: sys.stdout.write(str(sb) + " valueChanged\n"))
##QtCore.QObject.connect(sb, QtCore.SIGNAL('editingFinished()'), lambda: sys.stdout.write(str(sb) + " editingFinished\n"))
#sb.sigValueChanged.connect(valueChanged)
#sb.sigValueChanging.connect(valueChanging)
#sb.editingFinished.connect(lambda: sys.stdout.write(str(sb) + " editingFinished\n"))
#return win, w, [s1, s2, s3, s4]
#a = mkWin()
#def test(n=100):
#for i in range(n):
#win, w, sb = mkWin()
#for s in sb:
#w.setParent(None)
#s.setParent(None)
#s.valueChanged.disconnect()
#s.editingFinished.disconnect()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 34.581197 | 154 | 0.668562 |
4619af7bea18b05a13fecfe9f25482691e79a65f | 6,464 | py | Python | cloudshell/devices/standards/load_balancing/configuration_attributes_structure.py | QualiSystems/cloudshell-networking-devices | f316cefca174975424ec21854b672335feaf8f87 | [
"Apache-2.0"
] | null | null | null | cloudshell/devices/standards/load_balancing/configuration_attributes_structure.py | QualiSystems/cloudshell-networking-devices | f316cefca174975424ec21854b672335feaf8f87 | [
"Apache-2.0"
] | 34 | 2016-11-28T10:52:44.000Z | 2019-10-01T08:52:59.000Z | cloudshell/devices/standards/load_balancing/configuration_attributes_structure.py | QualiSystems/cloudshell-networking-devices | f316cefca174975424ec21854b672335feaf8f87 | [
"Apache-2.0"
] | 1 | 2017-05-23T08:46:45.000Z | 2017-05-23T08:46:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
class GenericNetworkingResource(object):
def __init__(self, shell_name=None, name=None, supported_os=None):
"""Init method
:param shell_name: Shell Name
:type shell_name: str
:param name: Resource Name
:type name: str
:param supported_os: list of supported OS
:type supported_os: list
"""
self.attributes = {}
self.shell_name = shell_name
self.name = name
self.supported_os = supported_os
self.fullname = None
self.address = None # The IP address of the resource
self.family = None # The resource family
if shell_name:
self.namespace_prefix = "{}.".format(self.shell_name)
else:
self.namespace_prefix = ""
@property
def backup_location(self):
"""
:rtype: str
"""
return self.attributes.get("{}Backup Location".format(self.namespace_prefix), None)
@property
def backup_type(self):
"""
:rtype: str
"""
return self.attributes.get("{}Backup Type".format(self.namespace_prefix), None)
@property
def backup_user(self):
"""
:rtype: str
"""
return self.attributes.get("{}Backup User".format(self.namespace_prefix), None)
@property
def backup_password(self):
"""
:rtype: string
"""
return self.attributes.get("{}Backup Password".format(self.namespace_prefix), None)
@property
def vrf_management_name(self):
"""
:rtype: str
"""
return self.attributes.get("{}VRF Management Name".format(self.namespace_prefix), None)
@property
def user(self):
"""
:rtype: str
"""
return self.attributes.get("{}User".format(self.namespace_prefix), None)
@property
def password(self):
"""
:rtype: string
"""
return self.attributes.get("{}Password".format(self.namespace_prefix), None)
@property
def enable_password(self):
"""
:rtype: str
"""
return self.attributes.get("{}Enable Password".format(self.namespace_prefix), None)
@property
def power_management(self):
"""
:rtype: bool
"""
return self.attributes.get("{}Power Management".format(self.namespace_prefix), None)
@property
def sessions_concurrency_limit(self):
"""
:rtype: float
"""
return self.attributes.get("{}Sessions Concurrency Limit".format(self.namespace_prefix), None)
@property
def snmp_read_community(self):
"""
:rtype: str
"""
return self.attributes.get("{}SNMP Read Community".format(self.namespace_prefix), None)
@property
def snmp_write_community(self):
"""
:rtype: str
"""
return self.attributes.get("{}SNMP Write Community".format(self.namespace_prefix), None)
@property
def snmp_v3_user(self):
"""
:rtype: str
"""
return self.attributes.get("{}SNMP V3 User".format(self.namespace_prefix), None)
@property
def snmp_v3_password(self):
"""
:rtype: string
"""
return self.attributes.get("{}SNMP V3 Password".format(self.namespace_prefix), None)
@property
def snmp_v3_private_key(self):
"""
:rtype: str
"""
return self.attributes.get("{}SNMP V3 Private Key".format(self.namespace_prefix), None)
@property
def snmp_v3_auth_protocol(self):
"""
:rtype: str
"""
return self.attributes.get("{}SNMP V3 Authentication Protocol".format(self.namespace_prefix), None)
@property
def snmp_v3_priv_protocol(self):
"""
:rtype: str
"""
return self.attributes.get("{}SNMP V3 Privacy Protocol".format(self.namespace_prefix), None)
@property
def snmp_version(self):
"""
:rtype: str
"""
return self.attributes.get("{}SNMP Version".format(self.namespace_prefix), None)
@property
def enable_snmp(self):
"""
:rtype: bool
"""
return self.attributes.get("{}Enable SNMP".format(self.namespace_prefix), None)
@property
def disable_snmp(self):
"""
:rtype: bool
"""
return self.attributes.get("{}Disable SNMP".format(self.namespace_prefix), None)
@property
def console_server_ip_address(self):
"""
:rtype: str
"""
return self.attributes.get("{}Console Server IP Address".format(self.namespace_prefix), None)
@property
def console_user(self):
"""
:rtype: str
"""
return self.attributes.get("{}Console User".format(self.namespace_prefix), None)
@property
def console_port(self):
"""
:rtype: float
"""
return self.attributes.get("{}Console Port".format(self.namespace_prefix), None)
@property
def console_password(self):
"""
:rtype: string
"""
return self.attributes.get("{}Console Password".format(self.namespace_prefix), None)
@property
def cli_connection_type(self):
"""
:rtype: str
"""
return self.attributes.get("{}CLI Connection Type".format(self.namespace_prefix), None)
@property
def cli_tcp_port(self):
"""
:rtype: str
"""
return self.attributes.get("{}CLI TCP Port".format(self.namespace_prefix), None)
def create_load_balancing_resource_from_context(shell_name, supported_os, context):
"""Creates an instance of Networking Resource by given context
:param shell_name: Shell Name
:type shell_name: str
:param supported_os: list of supported OS
:type supported_os: list
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype GenericNetworkingResource
"""
result = GenericNetworkingResource(shell_name=shell_name, name=context.resource.name, supported_os=supported_os)
result.address = context.resource.address
result.family = context.resource.family
result.fullname = context.resource.fullname
result.attributes = dict(context.resource.attributes)
return result
| 25.151751 | 116 | 0.604115 |
f27c1c8afb5471184ffa42708d2326ae53d0aae1 | 3,528 | py | Python | flexget/plugins/urlrewrite_newzleech.py | RSully/flexget-flexget | ab36590e569511a43c1e35b1dfae9b7fb8db1535 | [
"MIT"
] | null | null | null | flexget/plugins/urlrewrite_newzleech.py | RSully/flexget-flexget | ab36590e569511a43c1e35b1dfae9b7fb8db1535 | [
"MIT"
] | null | null | null | flexget/plugins/urlrewrite_newzleech.py | RSully/flexget-flexget | ab36590e569511a43c1e35b1dfae9b7fb8db1535 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import urllib
import urllib2
import logging
import re
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.soup import get_soup
from flexget.utils.tools import urlopener
log = logging.getLogger("newzleech")
class UrlRewriteNewzleech(object):
"""
UrlRewriter or search by using newzleech.com
TODO: implement basic url rewriting
"""
# Search API
@plugin.internet(log)
def search(self, task, entry, config=None):
txheaders = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': '300',
'Connection': 'keep-alive',
}
nzbs = set()
for search_string in entry.get('search_strings', [entry['title']]):
query = entry['title']
url = u'http://newzleech.com/?%s' % str(urllib.urlencode({'q': query.encode('latin1'),
'm': 'search', 'group': '', 'min': 'min',
'max': 'max', 'age': '', 'minage': '', 'adv': ''}))
#log.debug('Search url: %s' % url)
req = urllib2.Request(url, headers=txheaders)
page = urlopener(req, log)
soup = get_soup(page)
for item in soup.find_all('table', attrs={'class': 'contentt'}):
subject_tag = item.find('td', attrs={'class': 'subject'}).next
subject = ''.join(subject_tag.find_all(text=True))
complete = item.find('td', attrs={'class': 'complete'}).contents[0]
size = item.find('td', attrs={'class': 'size'}).contents[0]
nzb_url = 'http://newzleech.com/' + item.find('td', attrs={'class': 'get'}).next.get('href')
# generate regexp from entry title and see if it matches subject
regexp = query
wildcardize = [' ', '-']
for wild in wildcardize:
regexp = regexp.replace(wild, '.')
regexp = '.*' + regexp + '.*'
#log.debug('Title regexp: %s' % regexp)
if re.match(regexp, subject):
log.debug('%s matches to regexp' % subject)
if complete != u'100':
log.debug('Match is incomplete %s from newzleech, skipping ..' % query)
continue
log.info('Found \'%s\'' % query)
try:
size_num = float(size[:-3])
except (ValueError, TypeError):
log.error('Failed to parse_size %s' % size)
size_num = 0
# convert into megabytes
if 'GB' in size:
size_num *= 1024
if 'KB' in size:
size_num /= 1024
# choose largest file
nzbs.add(Entry(title=subject, url=nzb_url, content_size=size_num, search_sort=size_num))
return nzbs
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteNewzleech, 'newzleech', groups=['search'], api_ver=2)
| 40.090909 | 121 | 0.508787 |
99c614f63fead0fcd08f28725b11055a21aef97e | 4,265 | py | Python | flax/wallet/util/backup_utils.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 174 | 2021-06-16T17:49:22.000Z | 2022-03-17T03:03:17.000Z | flax/wallet/util/backup_utils.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 49 | 2021-06-17T14:10:53.000Z | 2022-01-31T11:04:21.000Z | flax/wallet/util/backup_utils.py | ReadyNeutron/shitcoin-blockchain | 80add4e545ad22a317244f7fd958d118a5a75c5d | [
"Apache-2.0"
] | 80 | 2021-06-17T14:23:31.000Z | 2022-02-24T05:52:47.000Z | import base64
import json
from typing import Any
import aiohttp
from blspy import AugSchemeMPL, PrivateKey, PublicKeyMPL, SignatureMPL
from cryptography.fernet import Fernet
from flax.server.server import ssl_context_for_root
from flax.ssl.create_ssl import get_mozilla_ca_crt
from flax.util.byte_types import hexstr_to_bytes
from flax.util.hash import std_hash
from flax.wallet.derive_keys import master_sk_to_backup_sk
from flax.wallet.util.wallet_types import WalletType
def open_backup_file(file_path, private_key):
backup_file_text = file_path.read_text()
backup_file_json = json.loads(backup_file_text)
meta_data = backup_file_json["meta_data"]
meta_data_bytes = json.dumps(meta_data).encode()
sig = backup_file_json["signature"]
backup_pk = master_sk_to_backup_sk(private_key)
my_pubkey = backup_pk.get_g1()
key_base_64 = base64.b64encode(bytes(backup_pk))
f = Fernet(key_base_64)
encrypted_data = backup_file_json["data"].encode()
msg = std_hash(encrypted_data) + std_hash(meta_data_bytes)
signature = SignatureMPL.from_bytes(hexstr_to_bytes(sig))
pubkey = PublicKeyMPL.from_bytes(hexstr_to_bytes(meta_data["pubkey"]))
sig_match_my = AugSchemeMPL.verify(my_pubkey, msg, signature)
sig_match_backup = AugSchemeMPL.verify(pubkey, msg, signature)
assert sig_match_my is True
assert sig_match_backup is True
data_bytes = f.decrypt(encrypted_data)
data_text = data_bytes.decode()
data_json = json.loads(data_text)
unencrypted = {}
unencrypted["data"] = data_json
unencrypted["meta_data"] = meta_data
return unencrypted
def get_backup_info(file_path, private_key):
json_dict = open_backup_file(file_path, private_key)
data = json_dict["data"]
wallet_list_json = data["wallet_list"]
info_dict = {}
wallets = []
for wallet_info in wallet_list_json:
wallet = {}
wallet["name"] = wallet_info["name"]
wallet["type"] = wallet_info["type"]
wallet["type_name"] = WalletType(wallet_info["type"]).name
wallet["id"] = wallet_info["id"]
wallet["data"] = wallet_info["data"]
wallets.append(wallet)
info_dict["version"] = data["version"]
info_dict["fingerprint"] = data["fingerprint"]
info_dict["timestamp"] = data["timestamp"]
info_dict["wallets"] = wallets
return info_dict
async def post(session: aiohttp.ClientSession, url: str, data: Any):
mozilla_root = get_mozilla_ca_crt()
ssl_context = ssl_context_for_root(mozilla_root)
response = await session.post(url, json=data, ssl=ssl_context)
return await response.json()
async def get(session: aiohttp.ClientSession, url: str):
response = await session.get(url)
return await response.text()
async def upload_backup(host: str, backup_text: str):
request = {"backup": backup_text}
session = aiohttp.ClientSession()
nonce_url = f"{host}/upload_backup"
upload_response = await post(session, nonce_url, request)
await session.close()
return upload_response
async def download_backup(host: str, private_key: PrivateKey):
session = aiohttp.ClientSession()
try:
backup_privkey = master_sk_to_backup_sk(private_key)
backup_pubkey = bytes(backup_privkey.get_g1()).hex()
# Get nonce
nonce_request = {"pubkey": backup_pubkey}
nonce_url = f"{host}/get_download_nonce"
nonce_response = await post(session, nonce_url, nonce_request)
nonce = nonce_response["nonce"]
# Sign nonce
signature = bytes(AugSchemeMPL.sign(backup_privkey, std_hash(hexstr_to_bytes(nonce)))).hex()
# Request backup url
get_backup_url = f"{host}/download_backup"
backup_request = {"pubkey": backup_pubkey, "signature": signature}
backup_response = await post(session, get_backup_url, backup_request)
if backup_response["success"] is False:
raise ValueError("No backup on backup service")
# Download from s3
backup_url = backup_response["url"]
backup_text = await get(session, backup_url)
await session.close()
return backup_text
except Exception as e:
await session.close()
# Pass exception
raise e
| 34.12 | 100 | 0.710903 |
abf52f0280264a197e9d6fbd3a74b776c341496e | 1,619 | py | Python | OpenShiftLibrary/keywords/services.py | red-hat-data-services/robotframework-openshift | 154b9176edf637fdccaa84d4318638c058a11106 | [
"MIT"
] | null | null | null | OpenShiftLibrary/keywords/services.py | red-hat-data-services/robotframework-openshift | 154b9176edf637fdccaa84d4318638c058a11106 | [
"MIT"
] | null | null | null | OpenShiftLibrary/keywords/services.py | red-hat-data-services/robotframework-openshift | 154b9176edf637fdccaa84d4318638c058a11106 | [
"MIT"
] | 2 | 2022-03-17T16:21:14.000Z | 2022-03-31T08:12:47.000Z | from typing import Optional
from robotlibcore import keyword
from OpenShiftLibrary.client import GenericClient
from OpenShiftLibrary.outputformatter import OutputFormatter
from OpenShiftLibrary.outputstreamer import OutputStreamer
from OpenShiftLibrary.errors import ResourceNotFound
class ServiceKeywords(object):
def __init__(self, client: GenericClient, output_formatter: OutputFormatter,
output_streamer: OutputStreamer) -> None:
self.client = client
self.output_formatter = output_formatter
self.output_streamer = output_streamer
@keyword
def services_should_contain(self, name: str, namespace: Optional[str] = None) -> None:
"""
Get services containing name
Args:
name (str): String that the name of the service must contain
namespace (Optional[str], optional): Namespace where the Service exists. Defaults to None.
"""
services = self.client.get(kind='Service', namespace=namespace)['items']
result = [service for service in services if name in service['metadata']['name']]
if not result:
error_message = f"Services with name containing {name} not found"
self.output_streamer.stream(error_message, 'error')
raise ResourceNotFound(error_message)
output = [{service['metadata']['name']: f"{service['spec']['clusterIPs']}:{service['spec']['ports']}"}
for service in result]
formatted_output = self.output_formatter.format(output, "Services found")
self.output_streamer.stream(formatted_output, "info")
| 43.756757 | 110 | 0.696726 |
40ac6bee475d58bf4f9d5ca1be989fbc56df7310 | 1,533 | py | Python | codegen/vhdre/__main__.py | honorpeter/fletcher | 9622293443b1ea70b1f3aa592098a64690600dd4 | [
"Apache-2.0"
] | 1 | 2021-03-05T08:24:57.000Z | 2021-03-05T08:24:57.000Z | codegen/vhdre/__main__.py | honorpeter/fletcher | 9622293443b1ea70b1f3aa592098a64690600dd4 | [
"Apache-2.0"
] | null | null | null | codegen/vhdre/__main__.py | honorpeter/fletcher | 9622293443b1ea70b1f3aa592098a64690600dd4 | [
"Apache-2.0"
] | null | null | null | from . import RegexMatcher
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
import sys
def usage():
print(r"Usage: python -m vhdre <entity-name> <regex> ... [-- <test-string> ...]")
print(r"")
print(r"Generates a file by the name <entity-name>.vhd in the working directory")
print(r"which matches against the given regular expressions. If one or more test")
print(r"strings are provided, a testbench by the name <entity-name>_tb.vhd is")
print(r"also generated. To insert a unicode code point, use {0xHHHHHH:u}. To")
print(r"insert a raw byte (for instance to check error handling) use {0xHH:b}.")
print(r"{{ and }} can be used for matching { and } literally.")
sys.exit(2)
if len(sys.argv) < 3:
usage()
# Figure out where the -- is (if it exists).
split = len(sys.argv)
for i, arg in enumerate(sys.argv[3:]):
if arg == "--":
split = i + 3
# Generate the matcher.
matcher = RegexMatcher(*sys.argv[1:split])
# Generate the main file.
vhd = str(matcher)
with open(sys.argv[1] + ".vhd", "w") as f:
f.write(vhd)
# Generate the testbench if desired.
vectors = sys.argv[split + 1:]
if vectors:
vhd_tb = matcher.testbench(vectors)
with open(sys.argv[1] + "_tb.vhd", "w") as f:
f.write(vhd_tb) | 34.840909 | 90 | 0.528376 |
114df1feff381a9968ca4967bfb503ee3a7b9f16 | 9,400 | py | Python | src/fidesops/api/v1/endpoints/connection_endpoints.py | autosuggested/fidesops | a399abbc39e8fc528bc31d1bd3f0419c3379e6f3 | [
"Apache-2.0"
] | null | null | null | src/fidesops/api/v1/endpoints/connection_endpoints.py | autosuggested/fidesops | a399abbc39e8fc528bc31d1bd3f0419c3379e6f3 | [
"Apache-2.0"
] | null | null | null | src/fidesops/api/v1/endpoints/connection_endpoints.py | autosuggested/fidesops | a399abbc39e8fc528bc31d1bd3f0419c3379e6f3 | [
"Apache-2.0"
] | null | null | null | import logging
from typing import List, Optional
from fastapi import APIRouter, HTTPException, Depends
from fastapi.params import Security
from fastapi_pagination.ext.sqlalchemy import paginate
from fastapi_pagination import Page, Params
from fastapi_pagination.bases import AbstractPage
from fidesops.schemas.shared_schemas import FidesOpsKey
from pydantic import ValidationError, conlist
from sqlalchemy.orm import Session
from starlette.status import HTTP_404_NOT_FOUND
from fidesops.common_exceptions import ConnectionException, KeyOrNameAlreadyExists
from fidesops.schemas.connection_configuration import (
get_connection_secrets_validator,
connection_secrets_schemas,
)
from fidesops.schemas.connection_configuration.connection_secrets import (
TestStatusMessage,
ConnectionConfigSecretsSchema,
ConnectionTestStatus,
)
from fidesops.service.connectors import get_connector
from fidesops.schemas.api import BulkUpdateFailed
from fidesops.schemas.connection_configuration.connection_config import (
ConnectionConfigurationResponse,
CreateConnectionConfiguration,
BulkPutConnectionConfiguration,
)
from fidesops.api.v1.scope_registry import (
CONNECTION_READ,
CONNECTION_DELETE,
CONNECTION_CREATE_OR_UPDATE,
)
from fidesops.util.logger import NotPii
from fidesops.util.oauth_util import verify_oauth_client
from fidesops.api import deps
from fidesops.models.connectionconfig import ConnectionConfig
from fidesops.api.v1.urn_registry import (
CONNECTION_BY_KEY,
CONNECTIONS,
V1_URL_PREFIX,
CONNECTION_SECRETS,
CONNECTION_TEST,
)
router = APIRouter(tags=["Connections"], prefix=V1_URL_PREFIX)
logger = logging.getLogger(__name__)
def get_connection_config_or_error(
db: Session, connection_key: FidesOpsKey
) -> ConnectionConfig:
"""Helper to load the ConnectionConfig object or throw a 404"""
connection_config = ConnectionConfig.get_by(db, field="key", value=connection_key)
logger.info(f"Finding connection configuration with key '{connection_key}'")
if not connection_config:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No connection configuration found with key '{connection_key}'.",
)
return connection_config
@router.get(
CONNECTIONS,
dependencies=[Security(verify_oauth_client, scopes=[CONNECTION_READ])],
response_model=Page[ConnectionConfigurationResponse],
)
def get_connections(
*, db: Session = Depends(deps.get_db), params: Params = Depends()
) -> AbstractPage[ConnectionConfig]:
"""Returns all connection configurations in the database."""
logger.info(
f"Finding all connection configurations with pagination params {params}"
)
return paginate(ConnectionConfig.query(db), params=params)
@router.get(
CONNECTION_BY_KEY,
dependencies=[Security(verify_oauth_client, scopes=[CONNECTION_READ])],
response_model=ConnectionConfigurationResponse,
)
def get_connection_detail(
connection_key: FidesOpsKey, db: Session = Depends(deps.get_db)
) -> ConnectionConfig:
"""Returns connection configuration with matching key."""
return get_connection_config_or_error(db, connection_key)
@router.patch(
CONNECTIONS,
dependencies=[Security(verify_oauth_client, scopes=[CONNECTION_CREATE_OR_UPDATE])],
status_code=200,
response_model=BulkPutConnectionConfiguration,
)
def patch_connections(
*,
db: Session = Depends(deps.get_db),
configs: conlist(CreateConnectionConfiguration, max_items=50), # type: ignore
) -> BulkPutConnectionConfiguration:
"""
Given a list of connection config data elements, create or update corresponding ConnectionConfig objects
or report failure
If the key in the payload exists, it will be used to update an existing ConnectionConfiguration.
Otherwise, a new ConnectionConfiguration will be created for you.
Note that ConnectionConfiguration.secrets are not updated through this endpoint.
"""
created_or_updated: List[ConnectionConfig] = []
failed: List[BulkUpdateFailed] = []
logger.info(f"Starting bulk upsert for {len(configs)} connection configuration(s)")
for config in configs:
orig_data = config.dict().copy()
try:
connection_config = ConnectionConfig.create_or_update(
db, data=config.dict()
)
created_or_updated.append(connection_config)
except KeyOrNameAlreadyExists as exc:
logger.warning(
f"Create/update failed for connection config with key '{config.key}': {exc}"
)
failed.append(
BulkUpdateFailed(
message=exc.args[0],
data=orig_data,
)
)
except Exception:
logger.warning(
f"Create/update failed for connection config with key '{config.key}'."
)
failed.append(
BulkUpdateFailed(
message=f"This connection configuration could not be added.",
data=orig_data,
)
)
return BulkPutConnectionConfiguration(
succeeded=created_or_updated,
failed=failed,
)
@router.delete(
CONNECTION_BY_KEY,
dependencies=[Security(verify_oauth_client, scopes=[CONNECTION_DELETE])],
status_code=204,
)
def delete_connection(
connection_key: FidesOpsKey, *, db: Session = Depends(deps.get_db)
) -> None:
"""Removes the connection configuration with matching key."""
connection_config = get_connection_config_or_error(db, connection_key)
logger.info(f"Deleting connection config with key '{connection_key}'.")
connection_config.delete(db)
def validate_secrets(
request_body: connection_secrets_schemas, connection_config: ConnectionConfig
) -> ConnectionConfigSecretsSchema:
"""Validate incoming connection configuration secrets."""
logger.info(
f"Validating secrets on connection config with key '{connection_config.key}'"
)
connection_type = connection_config.connection_type
saas_config = connection_config.get_saas_config()
schema = get_connection_secrets_validator(connection_type.value, saas_config)
try:
connection_secrets = schema.parse_obj(request_body)
except ValidationError as e:
raise HTTPException(
status_code=422,
detail=e.errors(),
)
return connection_secrets
def connection_status(
connection_config: ConnectionConfig, msg: str, db: Session = Depends(deps.get_db)
) -> TestStatusMessage:
"""Connect, verify with a trivial query, and report the status."""
connector = get_connector(connection_config)
try:
status: ConnectionTestStatus = connector.test_connection()
except ConnectionException as exc:
logger.warning(
"Connection test failed on %s: %s",
NotPii(connection_config.key),
str(exc),
)
connection_config.update_test_status(
test_status=ConnectionTestStatus.failed, db=db
)
return TestStatusMessage(
msg=msg,
test_status=ConnectionTestStatus.failed,
failure_reason=str(exc),
)
logger.info(f"Connection test {status.value} on {connection_config.key}")
connection_config.update_test_status(test_status=status, db=db)
return TestStatusMessage(
msg=msg,
test_status=status,
)
@router.put(
CONNECTION_SECRETS,
status_code=200,
dependencies=[Security(verify_oauth_client, scopes=[CONNECTION_CREATE_OR_UPDATE])],
response_model=TestStatusMessage,
)
async def put_connection_config_secrets(
connection_key: FidesOpsKey,
*,
db: Session = Depends(deps.get_db),
unvalidated_secrets: connection_secrets_schemas,
verify: Optional[bool] = True,
) -> TestStatusMessage:
"""
Update secrets that will be used to connect to a specified connection_type.
The specific secrets will be connection-dependent. For example, the components needed to connect to a Postgres DB
will differ from Dynamo DB.
"""
connection_config = get_connection_config_or_error(db, connection_key)
connection_config.secrets = validate_secrets(
unvalidated_secrets, connection_config
).dict()
# Save validated secrets, regardless of whether they've been verified.
logger.info(f"Updating connection config secrets for '{connection_key}'")
connection_config.save(db=db)
msg = f"Secrets updated for ConnectionConfig with key: {connection_key}."
if verify:
return connection_status(connection_config, msg, db)
return TestStatusMessage(msg=msg, test_status=None)
@router.get(
CONNECTION_TEST,
status_code=200,
dependencies=[Security(verify_oauth_client, scopes=[CONNECTION_READ])],
response_model=TestStatusMessage,
)
async def test_connection_config_secrets(
connection_key: FidesOpsKey,
*,
db: Session = Depends(deps.get_db),
) -> TestStatusMessage:
"""
Endpoint to test a connection at any time using the saved configuration secrets.
"""
connection_config = get_connection_config_or_error(db, connection_key)
msg = f"Test completed for ConnectionConfig with key: {connection_key}."
return connection_status(connection_config, msg, db)
| 34.432234 | 117 | 0.724043 |
dcd61d4355c8f47517a2fffad5f97fe4e1ffe037 | 15,006 | py | Python | idfy_sdk/services/signature/signature_service.py | idfy-io/idfy-sdk-python | 0f7ced0cf0df080b1c73e2451bf02a23710b5bf1 | [
"Apache-2.0"
] | null | null | null | idfy_sdk/services/signature/signature_service.py | idfy-io/idfy-sdk-python | 0f7ced0cf0df080b1c73e2451bf02a23710b5bf1 | [
"Apache-2.0"
] | null | null | null | idfy_sdk/services/signature/signature_service.py | idfy-io/idfy-sdk-python | 0f7ced0cf0df080b1c73e2451bf02a23710b5bf1 | [
"Apache-2.0"
] | null | null | null | import asyncio
import functools
import sys
from typing import List, Dict
from idfy_sdk.idfy_configuration import IdfyConfiguration as config
from idfy_sdk import urls as urls
from idfy_sdk.services.IdfyBaseService import IdfyBaseService
from idfy_sdk.services.signature import models as models
class SignatureService(IdfyBaseService):
"""Sign contracts, declarations, forms and other documents using digital signatures."""
def __init__(self, client_id=None, client_secret=None, scopes=None):
super().__init__(client_id=client_id, client_secret=client_secret, scopes=scopes)
#Documents
def get_document(self, documentId, threaded=False):
base = config.BaseUrl
url = base + urls.SignatureDocuments + '/' + documentId
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=models.CreateDocumentResponse))
return self.Get(url, model=models.CreateDocumentResponse)
def create_document(self, options: 'DocumentCreateOptions', threaded=False):
url = config.BaseUrl + urls.SignatureDocuments
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Post, url, model=models.CreateDocumentResponse, data=options))
return self.Post(url, model=models.CreateDocumentResponse, data=options)
def update_document(self, document_id, options: 'DocumentUpdateOptions' = None, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Patch, url, model=models.UpdateDocumentRequest, data=options))
return self.Patch(url, model=models.UpdateDocumentRequest, data=options)
def cancel_document(self, document_id, reason, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/cancel'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Post, url, params=reason))
self.Post(url, params=reason)
def get_document_status(self, document_id, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/status'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=models.Status))
return self.Get(url, model=models.Status)
def get_document_summary(self, document_id, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/summary'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=models.DocumentSummary))
return self.Get(url, model=models.DocumentSummary)
def list_document_summaries(
self,
external_id = None,
signer_id = None,
external_signer_id = None,
from_date = None,
to_date = None,
last_updated = None,
signed_date = None,
name_of_signer = None,
title = None,
status = None,
tags = None,
offset = None,
limit = None
, threaded=False):
params = {
"externalId": external_id,
"signerId": signer_id,
"externalSignerId": external_signer_id,
"fromDate": from_date,
"toDate": to_date,
"lastUpdated": last_updated,
"signedDate": signed_date,
"nameOfSigner": name_of_signer,
"title": title,
"status": status,
"tags": tags,
"offset": offset,
"limit": limit
}
url = config.BaseUrl + urls.SignatureDocuments + '/summary'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=models.CollectionWithPagingDocumentSummary, params=params))
return self.Get(url, model=models.CollectionWithPagingDocumentSummary, params=params)
#Signers
def get_signer(self, document_id, signer_id, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/signers/' + signer_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=models.SignerResponse))
return self.Get(url, model=models.SignerResponse)
def create_signer(self, document_id, signer_options: 'SignerWrapper', threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/signers'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Post, url, model=models.SignerResponse, data=signer_options))
return self.Post(url, model=models.SignerResponse, data=signer_options)
def update_signer(self, document_id, signer_id, signer_options: 'SignerWrapper', threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/signers/' + signer_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Patch, url, model=models.SignerResponse, data=signer_options))
return self.Patch(url, model=models.SignerResponse, data=signer_options)
def delete_signer(self, document_id, signer_id, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/signers/' + signer_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Delete, url))
self.Delete(url)
def list_signers(self, document_id, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/signers/'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=List[models.SignerResponse]))
return self.Get(url, model=List[models.SignerResponse])
#Signature/status/ping?
def get_attachment(self, document_id, attachment_id, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/attachments/' + attachment_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=models.AttachmentResponse))
return self.Get(url, model=models.AttachmentResponse)
def create_attachment(self, document_id, data: 'AttachmentRequestWrapper', threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/attachments/'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Post, url, model=models.AttachmentResponse, data=data))
return self.Post(url, model=models.AttachmentResponse, data=data)
def update_attachment(self, document_id, attachment_id, data: 'AttachmentOptions', threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/attachments/' + attachment_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Patch, url, model=models.AttachmentResponse, data=data))
return self.Patch(url, model=models.AttachmentResponse, data=data)
def delete_attachment(self, document_id, attachment_id, threaded=False): # Not "tested" yet.
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/attachments/' + attachment_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Delete, url))
return self.Delete(url)
def list_attachments(self, document_id, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/attachments'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=List[models.AttachmentListItem]))
return self.Get(url, model=List[models.AttachmentListItem])
def get_file(self, document_id, file_format, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/files'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model='file', params=file_format))
return self.Get(url, model='file', params=file_format)
def get_file_for_signer(self, document_id, signer_id, file_format, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/files/signers/' + signer_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model='file', params=file_format))
return self.Get(url, model='file', params=file_format)
def get_attachment_file(self, document_id, attachment_id, file_format, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/files/attachments/' + attachment_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model='file', params=file_format))
return self.Get(url, model='file', params=file_format)
def get_attachment_file_for_signer(self, document_id, attachment_id, signer_id, file_format, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/files/attachments/' + attachment_id + '/signers/' + signer_id
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model='file', params=file_format))
return self.Get(url, model='file', params=file_format)
def list_notifications(self, document_id, threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/notifications'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=List[models.NotificationLogItem]))
return self.Get(url, model=List[models.NotificationLogItem])
def send_reminders(self, document_id, manual_reminder: 'ManualReminder', threaded=False):
url = config.BaseUrl + urls.SignatureDocuments + '/' + document_id + '/notifications/reminder'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Post, url, model=models.ManualReminder, data=manual_reminder))
return self.Post(url, model=models.ManualReminder, data=manual_reminder)
def list_themes(self, threaded=False):
url = config.BaseUrl + urls.Signature + '/themes/list/themes'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=List[str]))
return self.Get(url, model=List[str])
def list_spinners(self, threaded=False):
url = config.BaseUrl + urls.Signature + '/themes/list/spinners'
if threaded:
if sys.version_info >= (3, 7):
loop = asyncio.get_running_loop()
else:
loop = asyncio.get_event_loop()
return loop.run_in_executor(None, functools.partial(self.Get, url, model=List[str]))
return self.Get(url, model=List[str]) | 40.447439 | 144 | 0.618219 |
eb82c83e7da6339f6860a8a16d4e36069af6aff3 | 7,309 | py | Python | tools/telemetry/telemetry/web_perf/timeline_interaction_record_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-01-25T09:58:49.000Z | 2020-01-25T09:58:49.000Z | tools/telemetry/telemetry/web_perf/timeline_interaction_record_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/telemetry/telemetry/web_perf/timeline_interaction_record_unittest.py | kjthegod/chromium | cf940f7f418436b77e15b1ea23e6fa100ca1c91a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2015-03-27T11:15:39.000Z | 2016-08-17T14:19:56.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.timeline import model as model_module
from telemetry.timeline import slice as slice_module
from telemetry.timeline import async_slice
from telemetry.web_perf import timeline_interaction_record as tir_module
class ParseTests(unittest.TestCase):
def testParse(self):
self.assertTrue(tir_module.IsTimelineInteractionRecord(
'Interaction.Foo'))
self.assertTrue(tir_module.IsTimelineInteractionRecord(
'Interaction.Foo/Bar'))
self.assertFalse(tir_module.IsTimelineInteractionRecord(
'SomethingRandom'))
class TimelineInteractionRecordTests(unittest.TestCase):
def CreateSimpleRecordWithName(self, event_name):
s = async_slice.AsyncSlice(
'cat', event_name,
timestamp=0, duration=200, thread_start=20, thread_duration=100)
return tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
def CreateTestSliceFromTimeRanges(
self, parent_thread, time_start, time_end, thread_start, thread_end):
duration = time_end - time_start
thread_duration = thread_end - thread_start
return slice_module.Slice(parent_thread, 'Test', 'foo', time_start,
duration, thread_start, thread_duration)
def testCreate(self):
r = self.CreateSimpleRecordWithName('Interaction.LogicalName')
self.assertEquals('LogicalName', r.label)
self.assertEquals(False, r.is_smooth)
self.assertEquals(False, r.is_responsive)
r = self.CreateSimpleRecordWithName('Interaction.LogicalName/is_smooth')
self.assertEquals('LogicalName', r.label)
self.assertEquals(True, r.is_smooth)
self.assertEquals(False, r.is_responsive)
r = self.CreateSimpleRecordWithName(
'Interaction.LogicalNameWith/Slash/is_smooth')
self.assertEquals('LogicalNameWith/Slash', r.label)
self.assertEquals(True, r.is_smooth)
self.assertEquals(False, r.is_responsive)
r = self.CreateSimpleRecordWithName(
'Interaction.LogicalNameWith/Slash/is_smooth,is_responsive')
self.assertEquals('LogicalNameWith/Slash', r.label)
self.assertEquals(True, r.is_smooth)
self.assertEquals(True, r.is_responsive)
def testGetJavaScriptMarker(self):
smooth_marker = tir_module.GetJavaScriptMarker(
'MyLabel', [tir_module.IS_SMOOTH])
self.assertEquals('Interaction.MyLabel/is_smooth', smooth_marker)
slr_marker = tir_module.GetJavaScriptMarker(
'MyLabel', [tir_module.IS_SMOOTH, tir_module.IS_RESPONSIVE])
self.assertEquals('Interaction.MyLabel/is_smooth,is_responsive', slr_marker)
def testGetOverlappedThreadTimeForSliceInSameThread(self):
# Create a renderer thread.
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
model.FinalizeImport()
# Make a record that starts at 30ms and ends at 60ms in thread time.
s = async_slice.AsyncSlice(
'cat', 'Interaction.Test/is_smooth',
timestamp=0, duration=200, start_thread=renderer_main,
end_thread=renderer_main, thread_start=30, thread_duration=30)
record = tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
# Non overlapped range on the left of event.
s1 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 10, 20)
self.assertEquals(0, record.GetOverlappedThreadTimeForSlice(s1))
# Non overlapped range on the right of event.
s2 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 70, 90)
self.assertEquals(0, record.GetOverlappedThreadTimeForSlice(s2))
# Overlapped range on the left of event.
s3 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 20, 50)
self.assertEquals(20, record.GetOverlappedThreadTimeForSlice(s3))
# Overlapped range in the middle of event.
s4 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 40, 50)
self.assertEquals(10, record.GetOverlappedThreadTimeForSlice(s4))
# Overlapped range on the left of event.
s5 = self.CreateTestSliceFromTimeRanges(renderer_main, 0, 100, 50, 90)
self.assertEquals(10, record.GetOverlappedThreadTimeForSlice(s5))
def testRepr(self):
# Create a renderer thread.
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
model.FinalizeImport()
s = async_slice.AsyncSlice(
'cat', 'Interaction.Test/is_smooth',
timestamp=0, duration=200, start_thread=renderer_main,
end_thread=renderer_main, thread_start=30, thread_duration=30)
record = tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
expected_repr = (
'TimelineInteractionRecord(label=\'Test\', '
'start=0.000000, end=200.000000, flags=is_smooth, '
'async_event=TimelineEvent(name=\'Interaction.Test/is_smooth\','
' start=0.000000, duration=200, thread_start=30, thread_duration=30))')
self.assertEquals(expected_repr, repr(record))
def testGetOverlappedThreadTimeForSliceInDifferentThread(self):
# Create a renderer thread and another thread.
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
another_thread = model.GetOrCreateProcess(1).GetOrCreateThread(3)
model.FinalizeImport()
# Make a record that starts at 50ms and ends at 150ms in wall time, and is
# scheduled 75% of the time (hence thread_duration = 100ms*75% = 75ms).
s = async_slice.AsyncSlice(
'cat', 'Interaction.Test/is_smooth',
timestamp=50, duration=100, start_thread=renderer_main,
end_thread=renderer_main, thread_start=55, thread_duration=75)
record = tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
# Non overlapped range on the left of event.
s1 = self.CreateTestSliceFromTimeRanges(another_thread, 25, 40, 28, 30)
self.assertEquals(0, record.GetOverlappedThreadTimeForSlice(s1))
# Non overlapped range on the right of event.
s2 = self.CreateTestSliceFromTimeRanges(another_thread, 200, 300, 270, 290)
self.assertEquals(0, record.GetOverlappedThreadTimeForSlice(s2))
# Overlapped range on the left of event, and slice is scheduled 50% of the
# time.
# The overlapped wall-time duration is 50ms.
# The overlapped thread-time duration is 50ms * 75% * 50% = 18.75
s3 = self.CreateTestSliceFromTimeRanges(another_thread, 0, 100, 20, 70)
self.assertEquals(18.75, record.GetOverlappedThreadTimeForSlice(s3))
# Overlapped range in the middle of event, and slice is scheduled 20% of the
# time.
# The overlapped wall-time duration is 40ms.
# The overlapped thread-time duration is 40ms * 75% * 20% = 6
s4 = self.CreateTestSliceFromTimeRanges(another_thread, 100, 140, 120, 128)
self.assertEquals(6, record.GetOverlappedThreadTimeForSlice(s4))
# Overlapped range on the left of event, and slice is scheduled 100% of the
# time.
# The overlapped wall-time duration is 32ms.
# The overlapped thread-time duration is 32ms * 75% * 100% = 24
s5 = self.CreateTestSliceFromTimeRanges(another_thread, 118, 170, 118, 170)
self.assertEquals(24, record.GetOverlappedThreadTimeForSlice(s5))
| 44.29697 | 80 | 0.744835 |
4df6aa046b94798a146b2475a7a6c875cacf2f07 | 1,278 | py | Python | 8kyu/test_who_is_going_to_pay_for_the_wall.py | adun/codewars.py | 89e7d81e9ca05a432007d634892c1cba28f5b715 | [
"MIT"
] | null | null | null | 8kyu/test_who_is_going_to_pay_for_the_wall.py | adun/codewars.py | 89e7d81e9ca05a432007d634892c1cba28f5b715 | [
"MIT"
] | null | null | null | 8kyu/test_who_is_going_to_pay_for_the_wall.py | adun/codewars.py | 89e7d81e9ca05a432007d634892c1cba28f5b715 | [
"MIT"
] | null | null | null | # Don Drumphet lives in a nice neighborhood, but one of his neighbors has started to
# let his house go. Don Drumphet wants to build a wall between his house and his
# neighbor’s, and is trying to get the neighborhood association to pay for it. He
# begins to solicit his neighbors to petition to get the association to build the wall.
# Unfortunately for Don Drumphet, he cannot read very well, has a very limited attention
# span, and can only remember two letters from each of his neighbors’ names. As he
# collects signatures, he insists that his neighbors keep truncating their names until
# two letters remain, and he can finally read them.
# Your code will show Full name of the neighbor and the truncated version of the name
# as an array. If the number of the characters in name is less than or equal to two,
# it will return an array containing only the name as is'
def who_is_paying(name):
return [name] if len(name) <= 2 else [name, name[:2]]
def test_who_is_paying():
assert who_is_paying('Mexico') == ['Mexico', 'Me']
assert who_is_paying('Melania') == ['Melania', 'Me']
assert who_is_paying('Melissa') == ['Melissa', 'Me']
assert who_is_paying('Me') == ['Me']
assert who_is_paying('') == ['']
assert who_is_paying('I') == ['I']
| 49.153846 | 88 | 0.72144 |
c6d12fabf46b2e398a5a65f0165bb2e2867a9a7e | 201 | py | Python | plutoserverextension.py | dralletje/pluto-on-binder | 13bae7e95d44514d4b4bbb38319adb5bbd8b082f | [
"Unlicense"
] | null | null | null | plutoserverextension.py | dralletje/pluto-on-binder | 13bae7e95d44514d4b4bbb38319adb5bbd8b082f | [
"Unlicense"
] | null | null | null | plutoserverextension.py | dralletje/pluto-on-binder | 13bae7e95d44514d4b4bbb38319adb5bbd8b082f | [
"Unlicense"
] | null | null | null | from subprocess import Popen
def load_jupyter_server_extension(nbapp):
Popen(["julia", "-Jprecompile.so", "-e", "import Pluto; Pluto.run(\"0.0.0.0\", 1234; security=Pluto.ServerSecurity(false))"])
| 50.25 | 129 | 0.721393 |
7e14bde21328e7e4df4cc41fe828eb04026a47ec | 792 | py | Python | synapse/tests/test_lib_auth.py | larrycameron80/synapse | 24bf21c40b4a467e5dc28c8204aecaf502d5cddf | [
"Apache-2.0"
] | null | null | null | synapse/tests/test_lib_auth.py | larrycameron80/synapse | 24bf21c40b4a467e5dc28c8204aecaf502d5cddf | [
"Apache-2.0"
] | null | null | null | synapse/tests/test_lib_auth.py | larrycameron80/synapse | 24bf21c40b4a467e5dc28c8204aecaf502d5cddf | [
"Apache-2.0"
] | null | null | null | from synapse.tests.common import *
import synapse.lib.auth as s_auth
class AuthTest(SynTest):
def test_auth_rules(self):
rules = (
(True, ('foo:bar', {'baz': 'faz'})),
(False, ('foo:bar', {'baz': 'woot*'})),
(True, ('hehe:*', {})),
)
rulz = s_auth.Rules(rules)
self.true(rulz.allow(('hehe:ha', {})))
self.true(rulz.allow(('foo:bar', {'baz': 'faz'})))
self.false(rulz.allow(('foo:bar', {'baz': 'wootwoot'})))
self.false(rulz.allow(('newp:newp', {})))
def test_auth_runas(self):
self.eq(s_auth.whoami(), 'root@localhost')
with s_auth.runas('visi@vertex.link'):
self.eq(s_auth.whoami(), 'visi@vertex.link')
self.eq(s_auth.whoami(), 'root@localhost')
| 27.310345 | 64 | 0.534091 |
5724a675ee913402b94584849f746e8562914041 | 12,697 | py | Python | ironic_python_agent/config.py | yrobla/ironic-python-agent | c8af93e0df6557e47f4045b53d11b2daaefb8ec3 | [
"Apache-2.0"
] | null | null | null | ironic_python_agent/config.py | yrobla/ironic-python-agent | c8af93e0df6557e47f4045b53d11b2daaefb8ec3 | [
"Apache-2.0"
] | null | null | null | ironic_python_agent/config.py | yrobla/ironic-python-agent | c8af93e0df6557e47f4045b53d11b2daaefb8ec3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from ironic_python_agent import netutils
from ironic_python_agent import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
APARAMS = utils.get_agent_params()
INSPECTION_DEFAULT_COLLECTOR = 'default'
INSPECTION_DEFAULT_DHCP_WAIT_TIMEOUT = 60
cli_opts = [
cfg.StrOpt('api_url',
default=APARAMS.get('ipa-api-url'),
regex='^(mdns|http(s?):\\/\\/.+)',
help='URL of the Ironic API. '
'Can be supplied as "ipa-api-url" kernel parameter.'
'The value must start with either http:// or https://. '
'A special value "mdns" can be specified to fetch the '
'URL using multicast DNS service discovery.'),
cfg.StrOpt('listen_host',
default=APARAMS.get('ipa-listen-host',
netutils.get_wildcard_address()),
sample_default='::',
help='The IP address to listen on. '
'Can be supplied as "ipa-listen-host" kernel parameter.'),
cfg.PortOpt('listen_port',
default=int(APARAMS.get('ipa-listen-port', 9999)),
help='The port to listen on. '
'Can be supplied as "ipa-listen-port" kernel parameter.'),
cfg.StrOpt('advertise_host',
default=APARAMS.get('ipa-advertise-host', None),
help='The host to tell Ironic to reply and send '
'commands to. '
'Can be supplied as "ipa-advertise-host" '
'kernel parameter.'),
cfg.PortOpt('advertise_port',
default=int(APARAMS.get('ipa-advertise-port', 9999)),
help='The port to tell Ironic to reply and send '
'commands to. '
'Can be supplied as "ipa-advertise-port" '
'kernel parameter.'),
cfg.IntOpt('ip_lookup_attempts',
min=1,
default=int(APARAMS.get('ipa-ip-lookup-attempts', 6)),
help='The number of times to try and automatically '
'determine the agent IPv4 address. '
'Can be supplied as "ipa-ip-lookup-attempts" '
'kernel parameter.'),
cfg.IntOpt('ip_lookup_sleep',
min=0,
default=int(APARAMS.get('ipa-ip-lookup-timeout', 10)),
help='The amount of time to sleep between attempts '
'to determine IP address. '
'Can be supplied as "ipa-ip-lookup-timeout" '
'kernel parameter.'),
cfg.StrOpt('network_interface',
default=APARAMS.get('ipa-network-interface', None),
help='The interface to use when looking for an IP address. '
'Can be supplied as "ipa-network-interface" '
'kernel parameter.'),
cfg.IntOpt('lookup_timeout',
min=0,
default=int(APARAMS.get('ipa-lookup-timeout', 300)),
help='The amount of time to retry the initial lookup '
'call to Ironic. After the timeout, the agent '
'will exit with a non-zero exit code. '
'Can be supplied as "ipa-lookup-timeout" '
'kernel parameter.'),
cfg.IntOpt('lookup_interval',
min=0,
default=int(APARAMS.get('ipa-lookup-interval', 1)),
help='The initial interval for retries on the initial '
'lookup call to Ironic. The interval will be '
'doubled after each failure until timeout is '
'exceeded. '
'Can be supplied as "ipa-lookup-interval" '
'kernel parameter.'),
cfg.FloatOpt('lldp_timeout',
default=APARAMS.get('ipa-lldp-timeout',
APARAMS.get('lldp-timeout', 30.0)),
help='The amount of seconds to wait for LLDP packets. '
'Can be supplied as "ipa-lldp-timeout" '
'kernel parameter.'),
cfg.BoolOpt('collect_lldp',
default=APARAMS.get('ipa-collect-lldp', False),
help='Whether IPA should attempt to receive LLDP packets for '
'each network interface it discovers in the inventory. '
'Can be supplied as "ipa-collect-lldp" '
'kernel parameter.'),
cfg.BoolOpt('standalone',
default=APARAMS.get('ipa-standalone', False),
help='Note: for debugging only. Start the Agent but suppress '
'any calls to Ironic API. '
'Can be supplied as "ipa-standalone" '
'kernel parameter.'),
cfg.StrOpt('inspection_callback_url',
default=APARAMS.get('ipa-inspection-callback-url'),
help='Endpoint of ironic-inspector. If set, hardware inventory '
'will be collected and sent to ironic-inspector '
'on start up. '
'A special value "mdns" can be specified to fetch the '
'URL using multicast DNS service discovery. '
'Can be supplied as "ipa-inspection-callback-url" '
'kernel parameter.'),
cfg.StrOpt('inspection_collectors',
default=APARAMS.get('ipa-inspection-collectors',
INSPECTION_DEFAULT_COLLECTOR),
help='Comma-separated list of plugins providing additional '
'hardware data for inspection, empty value gives '
'a minimum required set of plugins. '
'Can be supplied as "ipa-inspection-collectors" '
'kernel parameter.'),
cfg.IntOpt('inspection_dhcp_wait_timeout',
min=0,
default=APARAMS.get('ipa-inspection-dhcp-wait-timeout',
INSPECTION_DEFAULT_DHCP_WAIT_TIMEOUT),
help='Maximum time (in seconds) to wait for the PXE NIC '
'(or all NICs if inspection_dhcp_all_interfaces is True) '
'to get its IP address via DHCP before inspection. '
'Set to 0 to disable waiting completely. '
'Can be supplied as "ipa-inspection-dhcp-wait-timeout" '
'kernel parameter.'),
cfg.BoolOpt('inspection_dhcp_all_interfaces',
default=APARAMS.get('ipa-inspection-dhcp-all-interfaces',
False),
help='Whether to wait for all interfaces to get their IP '
'addresses before inspection. If set to false '
'(the default), only waits for the PXE interface. '
'Can be supplied as '
'"ipa-inspection-dhcp-all-interfaces" '
'kernel parameter.'),
cfg.IntOpt('hardware_initialization_delay',
min=0,
default=APARAMS.get('ipa-hardware-initialization-delay', 0),
help='How much time (in seconds) to wait for hardware to '
'initialize before proceeding with any actions. '
'Can be supplied as "ipa-hardware-initialization-delay" '
'kernel parameter.'),
cfg.IntOpt('disk_wait_attempts',
min=0,
default=APARAMS.get('ipa-disk-wait-attempts', 10),
help='The number of times to try and check to see if '
'at least one suitable disk has appeared in inventory '
'before proceeding with any actions. '
'Can be supplied as "ipa-disk-wait-attempts" '
'kernel parameter.'),
cfg.IntOpt('disk_wait_delay',
min=0,
default=APARAMS.get('ipa-disk-wait-delay', 3),
help='How much time (in seconds) to wait between attempts '
'to check if at least one suitable disk has appeared '
'in inventory. Set to zero to disable. '
'Can be supplied as "ipa-disk-wait-delay" '
'kernel parameter.'),
cfg.BoolOpt('insecure',
default=APARAMS.get('ipa-insecure', False),
help='Verify HTTPS connections. Can be supplied as '
'"ipa-insecure" kernel parameter.'),
cfg.StrOpt('cafile',
help='Path to PEM encoded Certificate Authority file '
'to use when verifying HTTPS connections. '
'Default is to use available system-wide configured CAs.'),
cfg.StrOpt('certfile',
help='Path to PEM encoded client certificate cert file. '
'Must be provided together with "keyfile" option. '
'Default is to not present any client certificates to '
'the server.'),
cfg.StrOpt('keyfile',
help='Path to PEM encoded client certificate key file. '
'Must be provided together with "certfile" option. '
'Default is to not present any client certificates to '
'the server.'),
cfg.BoolOpt('introspection_daemon',
default=False,
help='When the ``ironic-collect-introspection-data`` '
'command is executed, continue running as '
'a background process and continue to post data '
'to the bare metal inspection service.'),
cfg.IntOpt('introspection_daemon_post_interval',
default=300,
help='The interval in seconds by which to transmit data to '
'the bare metal introspection service when the '
'``ironic-collect-introspection-data`` program is '
'executing in daemon mode.'),
cfg.StrOpt('ntp_server',
default=APARAMS.get('ipa-ntp-server', None),
help='Address of a single NTP server against which the '
'agent should sync the hardware clock prior to '
'rebooting to an instance.'),
cfg.BoolOpt('fail_if_clock_not_set',
default=False,
help='If operations should fail if the clock time sync '
'fails to complete successfully.'),
cfg.StrOpt('agent_token',
default=APARAMS.get('ipa-agent-token'),
help='Pre-shared token to use when working with the '
'ironic API. This value is typically supplied by '
'ironic automatically.'),
cfg.BoolOpt('agent_token_required',
default=APARAMS.get('ipa-agent-token-required', False),
help='Control to enforce if API command requests should '
'enforce token validation. The configuration provided '
'by the conductor MAY override this and force this '
'setting to be changed to True in memory.'),
]
CONF.register_cli_opts(cli_opts)
def list_opts():
return [('DEFAULT', cli_opts)]
def override(params):
"""Override configuration with values from a dictionary.
This is used for configuration overrides from mDNS.
:param params: new configuration parameters as a dict.
"""
if not params:
return
LOG.debug('Overriding configuration with %s', params)
for key, value in params.items():
if key.startswith('ipa_'):
key = key[4:]
else:
LOG.warning('Skipping unknown configuration option %s', key)
continue
try:
CONF.set_override(key, value)
except Exception as exc:
LOG.warning('Unable to override configuration option %(key)s '
'with %(value)r: %(exc)s',
{'key': key, 'value': value, 'exc': exc})
| 46.170909 | 79 | 0.554383 |
f6210292dcf77c56b357b2b5332eaae75de687d0 | 2,034 | py | Python | videoanalyst/data/adaptor_dataset.py | lizhenbang56/Manipulating-Template-Pixels-for-Model-Adaptation-of-Siamese-Visual-Tracking | 76b88d8e68ac3d575a2ce81fc07ee2fce5f050d6 | [
"MIT"
] | 1 | 2020-04-08T02:57:59.000Z | 2020-04-08T02:57:59.000Z | videoanalyst/data/adaptor_dataset.py | lizhenbang56/Manipulating-Template-Pixels-for-Model-Adaptation-of-Siamese-Visual-Tracking | 76b88d8e68ac3d575a2ce81fc07ee2fce5f050d6 | [
"MIT"
] | null | null | null | videoanalyst/data/adaptor_dataset.py | lizhenbang56/Manipulating-Template-Pixels-for-Model-Adaptation-of-Siamese-Visual-Tracking | 76b88d8e68ac3d575a2ce81fc07ee2fce5f050d6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*
from itertools import chain
from typing import Dict
from loguru import logger
import torch
import torch.multiprocessing
from torch.utils.data import DataLoader, Dataset
from videoanalyst.utils.misc import Timer
from .datapipeline import builder as datapipeline_builder
# pytorch wrapper for multiprocessing
# https://pytorch.org/docs/stable/multiprocessing.html#strategy-management
_SHARING_STRATETY = "file_system"
if _SHARING_STRATETY in torch.multiprocessing.get_all_sharing_strategies():
torch.multiprocessing.set_sharing_strategy(_SHARING_STRATETY)
class AdaptorDataset(Dataset):
_EXT_SEED_STEP = 30011 # better to be a prime number
_SEED_STEP = 10007 # better to be a prime number
_SEED_DIVIDER = 1000003 # better to be a prime number
def __init__(
self,
task,
cfg,
num_epochs=1,
nr_image_per_epoch=1,
seed: int = 0,
):
self.datapipeline = None
self.task = task
self.cfg = cfg
self.num_epochs = num_epochs
self.nr_image_per_epoch = nr_image_per_epoch
self.ext_seed = seed
def __getitem__(self, item):
if self.datapipeline is None:
# build datapipeline with random seed the first time when __getitem__ is called
# usually, dataset is already spawned (into subprocess) at this point.
seed = (torch.initial_seed() + item * self._SEED_STEP +
self.ext_seed * self._EXT_SEED_STEP) % self._SEED_DIVIDER
self.datapipeline = datapipeline_builder.build(self.task,
self.cfg,
seed=seed)
logger.info("AdaptorDataset #%d built datapipeline with seed=%d" %
(item, seed))
training_data = self.datapipeline[item]
return training_data
def __len__(self):
return self.nr_image_per_epoch * self.num_epochs
| 34.474576 | 91 | 0.638643 |
e91b0098c98a626bd0a982afa3899ead03452cab | 214 | py | Python | defense/Scripts/adventuremap.py | kyongkyong289/pythongame | a695193d2a7b8ee9a6b1235c2794543d865a878f | [
"MIT"
] | null | null | null | defense/Scripts/adventuremap.py | kyongkyong289/pythongame | a695193d2a7b8ee9a6b1235c2794543d865a878f | [
"MIT"
] | null | null | null | defense/Scripts/adventuremap.py | kyongkyong289/pythongame | a695193d2a7b8ee9a6b1235c2794543d865a878f | [
"MIT"
] | null | null | null | import pygame
import visual as vs
import variables as var
import uidata as ui
import inputfunctions as iff
def display():
var.screen.blit(vs.font_title.render('Map', True, vs.Colors.black), ui.Main.title_text) | 26.75 | 91 | 0.775701 |
ab5f699e53032e831c262248e628cd668fdd8bf9 | 3,407 | py | Python | src/m6_loops_within_loops_printing.py | wilfonba/24-FinalExamPractice-201930 | 4c339355e8b84a7dd99f70bda7cef08afb876f32 | [
"MIT"
] | null | null | null | src/m6_loops_within_loops_printing.py | wilfonba/24-FinalExamPractice-201930 | 4c339355e8b84a7dd99f70bda7cef08afb876f32 | [
"MIT"
] | null | null | null | src/m6_loops_within_loops_printing.py | wilfonba/24-FinalExamPractice-201930 | 4c339355e8b84a7dd99f70bda7cef08afb876f32 | [
"MIT"
] | null | null | null | """
PRACTICE Exam 3.
This problem provides practice at:
*** LOOPS WITHIN LOOPS in PRINTING-TO-CONSOLE problems. ***
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Ben Wilfong.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
###############################################################################
def main():
""" Calls the TEST functions in this module. """
run_test_shape()
def run_test_shape():
""" Tests the shape function. """
print()
print('--------------------------------------------------')
print('Testing the SHAPE function:')
print('--------------------------------------------------')
print()
print('Test 1 of shape: r=7')
shape(7)
print()
print('Test 2 of shape: r=4')
shape(4)
print()
print('Test 3 of shape: r=2')
shape(2)
def shape(r):
"""
Prints a shape with r rows that looks like this example where r=7:
+++++++!7654321
++++++!654321-
+++++!54321--
++++!4321---
+++!321----
++!21-----
+!1------
Another example, where r=4:
++++!4321
+++!321-
++!21--
+!1---
Preconditions: r is a positive number.
For purposes of "lining up", assume r is a single digit.
"""
# -------------------------------------------------------------------------
# Done: 2. Implement and test this function.
# Some tests are already written for you (above).
#
###########################################################################
# IMPLEMENTATION RESTRICTION:
# You may NOT use string multiplication in this problem.
###########################################################################
# -------------------------------------------------------------------------
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 15 minutes.
# -------------------------------------------------------------------------
for k in range(r):
for j in range(k):
print(" ", end="")
for i in range(r - k):
print("+", end="")
print("!", end="")
for q in range(r - k):
print(r - q - k, end="")
for w in range(r - q):
print("-", end="")
print()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 31.546296 | 79 | 0.434693 |
aa5ae4ebf613bae2b6fb1abe366149e99d79d1b5 | 1,171 | py | Python | tensorflow_datasets/text/goemotions_test.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 2 | 2022-02-14T09:51:39.000Z | 2022-02-14T13:27:49.000Z | tensorflow_datasets/text/goemotions_test.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/text/goemotions_test.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 1 | 2020-12-13T22:11:33.000Z | 2020-12-13T22:11:33.000Z | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""goemotions dataset."""
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.text import goemotions
class GoemotionsTest(tfds.testing.DatasetBuilderTestCase):
DATASET_CLASS = goemotions.Goemotions
SPLITS = {
'train': 2, # Number of fake train example
'validation': 1, # Number of fake validation example
'test': 1, # Number of fake test example
}
DL_EXTRACT_RESULT = {
'train': 'train.tsv',
'dev': 'dev.tsv',
'test': 'test.tsv',
}
if __name__ == '__main__':
tfds.testing.test_main()
| 30.815789 | 74 | 0.721605 |
a1f8dfa239baa33d5b7fb8a82ab8e95fa6b75853 | 611 | bzl | Python | source/bazel/deps/glog/get.bzl | luxe/unilang | 6c8a431bf61755f4f0534c6299bd13aaeba4b69e | [
"MIT"
] | 33 | 2019-05-30T07:43:32.000Z | 2021-12-30T13:12:32.000Z | source/bazel/deps/glog/get.bzl | luxe/unilang | 6c8a431bf61755f4f0534c6299bd13aaeba4b69e | [
"MIT"
] | 371 | 2019-05-16T15:23:50.000Z | 2021-09-04T15:45:27.000Z | source/bazel/deps/glog/get.bzl | luxe/unilang | 6c8a431bf61755f4f0534c6299bd13aaeba4b69e | [
"MIT"
] | 6 | 2019-08-22T17:37:36.000Z | 2020-11-07T07:15:32.000Z | # Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def glog():
http_archive(
name="glog" ,
sha256="bae42ec37b50e156071f5b92d2ff09aa5ece56fd8c58d2175fc1ffea85137664" ,
strip_prefix="glog-0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6" ,
urls = [
"https://github.com/Unilang/glog/archive/0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6.tar.gz",
], repo_mapping = {
"@com_github_gflags_gflags" : "@gflags",
},
)
| 32.157895 | 102 | 0.677578 |
fba1ce0254050682784d9a667d8d59a7932e305b | 204 | py | Python | python/6kyu/Which are in?/solution.py | abfarah/codewars | 5aa41a36a6255cb559d23c62f75679e4f3818692 | [
"MIT"
] | null | null | null | python/6kyu/Which are in?/solution.py | abfarah/codewars | 5aa41a36a6255cb559d23c62f75679e4f3818692 | [
"MIT"
] | null | null | null | python/6kyu/Which are in?/solution.py | abfarah/codewars | 5aa41a36a6255cb559d23c62f75679e4f3818692 | [
"MIT"
] | null | null | null | def in_array(array1, array2):
array1.sort()
result = []
for i in array1:
for j in array2:
if i in j and i not in result:
result.append(i)
return result
| 22.666667 | 42 | 0.534314 |
341e645f422d2f00ff39b0a2ebb6cf228a853a5f | 10,632 | py | Python | seq2ftr/feature_calculators.py | smoothnlp/Squeeze | 7b968cbc42871634360f2daa9b2c60499bda3bf6 | [
"Apache-2.0"
] | 5 | 2019-07-17T09:02:57.000Z | 2021-05-17T02:50:13.000Z | seq2ftr/feature_calculators.py | smoothnlp/Squeeze | 7b968cbc42871634360f2daa9b2c60499bda3bf6 | [
"Apache-2.0"
] | null | null | null | seq2ftr/feature_calculators.py | smoothnlp/Squeeze | 7b968cbc42871634360f2daa9b2c60499bda3bf6 | [
"Apache-2.0"
] | 2 | 2019-06-04T14:40:56.000Z | 2020-03-20T13:41:50.000Z | import hashlib
from functools import wraps
def set_property(*args):
"""
The decorators for set properties for individual feature calculators
:param args:
name: the function's name for display
stype: the supporting types for the feature calculating function
0 for boolean, 1 for numericla, 2 for categorical
:return:
"""
def decorate_func(func):
for i in range(0,len(args),2):
setattr(func, args[i], args[i+1])
return func
return decorate_func
def listify_type(func):
"""
Decorator for casting input to list
:param func:
:return:
"""
@wraps(func)
def listify(*args):
x = args[0]
if not isinstance(x,list):
x = list(x)
return func(x)
return listify
def filter_none(func):
"""
filter None value in input list
:param func:
:return:
"""
@wraps(func)
def filterfunc(*args):
x = args[0]
x = [a for a in x if a is not None]
return func(x)
return filterfunc
##########################
## Supporting Funcitons ##
##########################
def _shift(x:list,n:int):
"""
works similar to np.roll
:param x:
:param n:
:return:
"""
return x[n:]+x[:n]
def _sort(x:list):
x_copy = x.copy()
x_copy.sort()
return x_copy
global x_sorted
def _appearance_count(x:list):
"""
get frequency count
:param x:
:return:
"""
freq = {k:0 for k in set(x)}
for xi in x:
freq[xi]+=1
return freq
global x_freq_count
def _token_hash(x:list):
"""
get input hash result
:param key:
:return:
"""
if isinstance(x, str):
x_md5 = hashlib.md5(x.encode("utf-8")).hexdigest()
y = [int(v) for v in list(x) if v.isdigit()]
y = sum(y)
x_hash = hash(y)
else:
x_hash = hash(x)
return x_hash
def _z_transform(x:list):
xmean = _mean(x)
xstd = _std(x)
return [(xi-xmean)/xstd for xi in x]
#########################
## Feature Calculators ##
#########################
@set_property("name","mean","stypes",[0,1])
@listify_type
@filter_none
def _mean(x:list):
return sum(x)/len(x)
@set_property("name","max","stypes",[0,1])
@listify_type
@filter_none
def _max(x:list):
return max(x)
@set_property("name","freq_of_max","stypes",[1])
@filter_none
def _freq_of_max(x:list):
max_x = _max(x)
return len([xi for xi in x if xi >=max_x])
@set_property("name","min","stypes",[0,1])
@listify_type
@filter_none
def _min(x:list):
return min(x)
@set_property("name","freq_of_min","stypes",[1])
@filter_none
def _freq_of_min(x:list):
min_x = _min(x)
return len([xi for xi in x if xi<=min_x])
@set_property("name",'median',"stypes",[1])
@listify_type
@filter_none
def _median(x:list):
x_sorted = _sort(x)
x_len = len(x_sorted)
if x_len%2==1:
return x_sorted[x_len//2]
else:
return x_sorted[x_len//2]*0.5 + x_sorted[x_len//2-1]*0.5
@set_property("name","median_mean_distance","stypes",[1])
@filter_none
def _median_mean_distance(x:list):
return abs(_mean(x)-_median(x))/(_max(x)-_min(x))
@set_property("name","percentage_below_mean","stypes",[1])
@filter_none
def _percentage_below_mean(x:list):
x_mean = _mean(x)
return len([xi for xi in x if xi<x_mean])/_len(x)
@set_property("name","variance","stypes",[1])
@filter_none
def _var(x:list):
avg = _mean(x)
return sum([(xi-avg)**2 for xi in x])/len(x)
@set_property("name","standard_deviation","stypes",[1])
@filter_none
def _std(x:list):
return _var(x)**(0.5)
@set_property("name","uniqueCount","stypes",[0,1,2])
@listify_type
@filter_none
def _uniqueCount(x:list):
return len(set(x))
@set_property("name","length","stypes",[0,1,2])
@listify_type
@filter_none
def _len(x:list):
return len(x)
@set_property("name","duplicates_count","stypes",[0,1,2])
@filter_none
def _num_duplicates(x:list):
return _len(x) - _uniqueCount(x)+1
@set_property("name","flucturate_rate","stypes",[0,2])
@listify_type
@filter_none
def _flucturate_rate(x:list,shift=1):
x_shifted = _shift(x,shift)
flucturate_vec = [xi1==xi2 for xi1,xi2 in zip(x[:-shift],x_shifted[:-shift])]
return sum(flucturate_vec)/(len(x)-shift)
@set_property("name","percentage_of_most_reoccuring_value_to_all_values","stypes",[1,2])
@listify_type
@filter_none
def _percentage_of_most_reoccuring_value_to_all_values(x:list):
x_freq_count = _appearance_count(x)
return 1/len(x_freq_count)
@set_property("name","percentage_of_most_reoocuring_value_to_all_datapoints","stypes",[1,2])
@listify_type
@filter_none
def _percentage_of_most_reoocuring_value_to_all_datapoints(x:list):
x_freq_count = _appearance_count(x)
return max(x_freq_count.values()) / len(x)
@set_property("name","last_location_of_max","stypes",[1])
@filter_none
def _last_location_of_max(x:list):
xmax = _max(x)
for i in range(1,len(x)+1):
if x[-i] == xmax:
return i
@set_property("name","fist_location_of_max","stypes",[1])
@filter_none
def _first_location_of_max(x:list):
xmax = _max(x)
for i in range(len(x)):
if x[i] == xmax:
return i
@set_property("name","last_location_of_min","stypes",[1])
@filter_none
def _last_location_of_min(x:list):
xmin = _min(x)
for i in range(1,len(x)+1):
if x[-i] == xmin:
return i
@set_property("name","fist_location_of_min","stypes",[1])
@filter_none
def _first_location_of_min(x:list):
xmin = _min(x)
for i in range(len(x)):
if x[i] == xmin:
return i
@set_property("name","ratio_value_number_to_seq_length","stypes",[1,2])
@filter_none
def _ratio_value_number_to_seq_length(x:list):
return len(set(x))/_len(x)
@set_property("name","number_peaks_1","stypes",[1])
@listify_type
@filter_none
def _number_peaks(x,n=1):
counter = 0
for i in range(n,len(x)-n):
neighbors=[x[i-j] for j in range(1,n+1)] + [x[i+j] for j in range(1,n+1)]
if x[i] > max(neighbors):
counter+=1
return counter
@set_property("name","number_peaks_2","stypes",[1])
@filter_none
def _number_peaks2(x:list):
return _number_peaks(x,2)
@set_property("name","number_peaks_3","stypes",[1])
@filter_none
def _number_peaks3(x:list):
return _number_peaks(x,3)
@set_property("name", "skewness", "stypes", [1])
@listify_type
@filter_none
def _skewness(x:list):
avg = _mean(x)
adjusted = [v - avg for v in x]
count = len(x)
adjusted2 = [pow(v,2) for v in adjusted]
adjusted3 = [adjusted2[i] * adjusted[i] for i in range(len(adjusted))]
m2 = sum(adjusted2)
m3 = sum(adjusted3)
if count<3:
return None
else:
if m2 == 0:
return 0
else:
result = (count * (count -1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
return round(result, 6)
@set_property("name","kurtosis", "stypes", [1])
@listify_type
@filter_none
def _kurtosis(x:list):
avg = _mean(x)
count = len(x)
adjusted = [v - avg for v in x]
adjusted2 = [pow(v,2) for v in adjusted]
adjusted4 = [pow(v,2) for v in adjusted2]
m2 = sum(adjusted2)
m4 = sum(adjusted4)
if count<4:
return None
else:
adj = 3 * (count -1) ** 2 / ((count -2) * (count-3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2 ** 2
if denom == 0:
return 0
else:
return round(numer/denom - adj, 6)
@set_property("name","abs_energy","stypes",[1])
@listify_type
@filter_none
def _abs_energy(x:list):
"""
.. math::
E = \\sum_{i=1,\ldots, n} x_i^2
:param x:
:return:
"""
return sum([xi**2 for xi in x])
@set_property("name","cid_ce","stypes",[1])
@filter_none
def _cid_ce(x:list):
"""
.. math::
\\sqrt{ \\sum_{i=0}^{n-2lag} ( x_{i} - x_{i+1})^2 }
.. rubric:: References
| [1] Batista, Gustavo EAPA, et al (2014).
| CID: an efficient complexity-invariant distance for time series.
:param x:
:return:
"""
x_z_transformed = _z_transform(x)
return _abs_energy(x_z_transformed)**(0.5)
@set_property("name","mean_change","stypes",[1])
@filter_none
def _mean_change(x):
x_rolled = _shift(x,1)
x_diff = [xi-xj for xi,xj in zip(x,x_rolled)]
return _mean(x_diff)
@set_property("name","min_change","stypes",[1])
@filter_none
def _mean_change(x):
x_rolled = _shift(x,1)
x_diff = [xi-xj for xi,xj in zip(x,x_rolled)]
return _min(x_diff)
@set_property("name","_ndex_mass_quantile_25","stypes",[1])
@filter_none
def _index_mass_quantile(x:list,percentile = 0.25):
quantile_value = sum(x)*percentile
xlen = len(x)
cumsum = 0;
sign_flag = (quantile_value>=0)
for i,xval in enumerate(x):
cumsum+=xval
if (sign_flag and cumsum>quantile_value) or ((not sign_flag) and cumsum<quantile_value):
return i/xlen
return 0
@set_property("name","ndex_mass_quantile_50","stypes",[1])
@filter_none
def _idnex_mass_quantile_50(x):
return _index_mass_quantile(x,0.5)
@set_property("name","ndex_mass_quantile_75","stypes",[1])
@filter_none
def _idnex_mass_quantile_75(x):
return _index_mass_quantile(x,0.75)
@set_property("name","categorical_max_freq_key_hash_code", "stypes", [2])
@listify_type
@filter_none
def _categorical_max_freq_key_hash_code(x:list):
x_freq_count = _appearance_count(x)
x_max_freq_key = [xkey for xkey,xval in x_freq_count.items() if xval == max(x_freq_count.values())][0]
# x_freq_count_sort = sorted(x_freq_count.items(), key=lambda d: d[1],reverse=True)
# x_max_freq_keyax_freq_key = x_freq_count_sort[0][0]
return _token_hash(x_max_freq_key)
@set_property("name","categorical_min_freq_key_hash_code", "stypes", [2])
@listify_type
@filter_none
def _categorical_min_freq_key_hash_code(x:list):
x_freq_count = _appearance_count(x)
x_min_freq_key = [xkey for xkey,xval in x_freq_count.items() if xval == max(x_freq_count.values())][0]
return _token_hash(x_min_freq_key)
@set_property("name","none_rate", "stypes",[0,1,2])
@listify_type
def _none_rate(x:list):
return len([a for a in x if a is not None])/len(x)
#### TODO #####
"""
fft_coefficient
Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast
fourier transformation algorithm
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0,
\\ldots , n-1.
"""
"""
entropy
"""
"""
Linear model coefficients
""" | 26.058824 | 108 | 0.6345 |
6e5d27660a10559829ba18ce8300267a29167e44 | 1,423 | py | Python | HW1_Random/codes/run_mlp.py | liuyuezhangadam/artificial-neural-network | 36fcd9625633eeb8eb0a924274dd5a8564ffe6e0 | [
"MIT"
] | 1 | 2021-04-26T05:42:22.000Z | 2021-04-26T05:42:22.000Z | HW1_Random/codes/run_mlp.py | liuyuezhangadam/artificial-neural-network | 36fcd9625633eeb8eb0a924274dd5a8564ffe6e0 | [
"MIT"
] | null | null | null | HW1_Random/codes/run_mlp.py | liuyuezhangadam/artificial-neural-network | 36fcd9625633eeb8eb0a924274dd5a8564ffe6e0 | [
"MIT"
] | null | null | null | from network import Network
from utils import LOG_INFO
from layers import Relu, Sigmoid, Linear
from loss import EuclideanLoss
from solve_net import train_net, test_net
from load_data import load_mnist_2d
train_data, test_data, train_label, test_label = load_mnist_2d('data')
# Your model defintion here
# You should explore different model architecture
model = Network()
model.add(Linear('fc1', 784, 512, 0.01))
model.add(Relu('relu1'))
model.add(Linear('fc2', 512, 512, 0.01))
model.add(Relu('relu2'))
model.add(Linear('fc3', 512, 10, 0.01))
loss = EuclideanLoss(name='loss')
# Training configuration
# You should adjust these hyperparameters
# NOTE: one iteration means model forward-backwards one batch of samples.
# one epoch means model has gone through all the training samples.
# 'disp_freq' denotes number of iterations in one epoch to display information.
config = {
'learning_rate': 0.01,
'weight_decay': 5e-4,
'momentum': 0.9,
'batch_size': 100,
'max_epoch': 100,
'disp_freq': 50,
'test_epoch': 5
}
for epoch in range(config['max_epoch']):
LOG_INFO('Training @ %d epoch...' % (epoch))
train_net(model, loss, config, train_data, train_label, config['batch_size'], config['disp_freq'])
if epoch % config['test_epoch'] == 0:
LOG_INFO('Testing @ %d epoch...' % (epoch))
test_net(model, loss, test_data, test_label, config['batch_size'])
| 30.276596 | 102 | 0.706254 |
3716295cfd6e10c0d83ad524ef9e1fcb8b4affee | 4,225 | py | Python | appdaemon/archived_apps/lighting/alarm_clock.py | ConnorGriffin/Home-AssistantConfig | 73eac5fc7f0283f5953d31a10fea6e1a4f7c99d6 | [
"MIT"
] | null | null | null | appdaemon/archived_apps/lighting/alarm_clock.py | ConnorGriffin/Home-AssistantConfig | 73eac5fc7f0283f5953d31a10fea6e1a4f7c99d6 | [
"MIT"
] | null | null | null | appdaemon/archived_apps/lighting/alarm_clock.py | ConnorGriffin/Home-AssistantConfig | 73eac5fc7f0283f5953d31a10fea6e1a4f7c99d6 | [
"MIT"
] | null | null | null | import appdaemon.plugins.hass.hassapi as hass
import datetime
class AlarmClock(hass.Hass):
def initialize(self):
# Get the alarm clock groups in the alarm clock view, setup listeners for each
alarm_groups = self.get_state(
'group.alarmclock_view', attribute='entity_id')
for alarm in alarm_groups:
alarm_name = alarm.split('.')[1]
# Every miunte evaluate if alarm should trigger
self.run_minutely(
self.alarm_triggered_cb,
# .now() is broken, github issue #878
(datetime.datetime.now() + datetime.timedelta(seconds=1)).time(),
constrain_input_boolean='input_boolean.{}_enabled'.format(
alarm_name),
alarm_name=alarm_name
)
# Update the alarm group name instantly if the alarm is renamed
self.listen_state(
self.alarm_renamed_cb,
entity='input_text.{}_name'.format(alarm_name),
alarm_name=alarm_name
)
# Update the alarm group name every 5 minutes (for reboots, group reload, etc)
self.run_every(
self.scheduled_rename_cb,
# .now() is broken, github issue #878
datetime.datetime.now() + datetime.timedelta(seconds=3),
interval=300,
alarm_name=alarm_name,
)
def rename_alarm(self, alarm_name):
# Get the alarm's friendly name (group name) and input name (Name textbox)
input_name = self.get_state('input_text.{}_name'.format(alarm_name))
friendly_name = self.friendly_name('group.{}'.format(alarm_name))
# If the two names don't match, set the friendly name to the input name
if input_name != friendly_name:
self.call_service(
service='group/set',
object_id=alarm_name,
name=input_name
)
def trigger_alarm(self, kwargs):
alarm_name = kwargs.get('alarm_name')
# Notify HomeAssistant that this alarm has triggered. Other AppDaemon apps can subscribe to this event to take action on the alarm
self.log('Fired alarm {}.'.format(alarm_name))
self.fire_event(
"alarm_fired",
alarm_name=alarm_name
)
def alarm_renamed_cb(self, entity, attribute, old, new, kwargs):
alarm_name = kwargs.get('alarm_name')
self.rename_alarm(alarm_name)
def scheduled_rename_cb(self, kwargs):
alarm_name = kwargs.get('alarm_name')
self.rename_alarm(alarm_name)
def alarm_triggered_cb(self, kwargs):
alarm_name = kwargs.get('alarm_name')
# Get the alarm properties
time = self.get_state('input_datetime.{}_datetime'.format(alarm_name))
alarm_seconds = (int(time.split(
':')[0]) * 60 + int(time.split(':')[1])) * 60 + int(time.split(':')[2])
# Get the total seconds since midnight, used to calculate alarm triggered time
now = datetime.datetime.now()
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
now_seconds = (now - midnight).seconds
# If the alarm time is before the current time, add 24 hours. This allows the script to run at 23:59 to trigger an alarm at 00:00
if alarm_seconds < now_seconds:
alarm_seconds += 86400
alarm_day = (now + datetime.timedelta(days=1)
).strftime("%A").lower()
else:
alarm_day = now.strftime("%A").lower()
# Get the alarm day selector boolean for the current day (or whatever day the alarm is supposed to fire in)
alarm_day_enabled = self.get_state(
'input_boolean.{}_{}'.format(alarm_name, alarm_day))
# Determine if the alarm is set to go off within 60 seconds
diff_seconds = alarm_seconds - now_seconds
if diff_seconds > 0 and diff_seconds <= 60 and alarm_day_enabled == 'on':
# Fire the alarm
self.run_in(
self.trigger_alarm,
delay=diff_seconds,
alarm_name=alarm_name,
)
| 40.238095 | 138 | 0.60071 |
4e87dc8f04b1f3f5e5ee4d16ca0b553fe4f69117 | 3,875 | py | Python | arthur.carvalho/snake/snake.py | LUDUSLab/stem-games | 347afa8b1511d76f8070fa69f27a49b57e551376 | [
"MIT"
] | 2 | 2021-01-24T01:04:34.000Z | 2021-05-06T16:25:53.000Z | arthur.carvalho/snake/snake.py | LUDUSLab/stem-games | 347afa8b1511d76f8070fa69f27a49b57e551376 | [
"MIT"
] | null | null | null | arthur.carvalho/snake/snake.py | LUDUSLab/stem-games | 347afa8b1511d76f8070fa69f27a49b57e551376 | [
"MIT"
] | 3 | 2021-01-26T21:35:43.000Z | 2021-05-06T16:06:47.000Z | import pygame
from random import randint
def body_snake_move():
global score, apple_position, score_text
if UP:
snake_position[0] = (snake_position[0][0], snake_position[0][1] - 10)
for c in range(len(snake_position) - 1, 0, -1):
snake_position[c] = (snake_position[c - 1][0], snake_position[c - 1][1])
elif DOWN:
snake_position[0] = (snake_position[0][0], snake_position[0][1] + 10)
for c in range(len(snake_position) - 1, 0, -1):
snake_position[c] = (snake_position[c - 1][0], snake_position[c - 1][1])
elif LEFT:
snake_position[0] = (snake_position[0][0] - 10, snake_position[0][1])
for c in range(len(snake_position) - 1, 0, -1):
snake_position[c] = (snake_position[c - 1][0], snake_position[c - 1][1])
elif RIGHT:
snake_position[0] = (snake_position[0][0] + 10, snake_position[0][1])
for c in range(len(snake_position) - 1, 0, -1):
snake_position[c] = (snake_position[c - 1][0], snake_position[c - 1][1])
# coordinate's apple
if snake_position[0] == apple_position:
eating_sound.play()
apple_position = (randint(0, 490) // 10 * 10, randint(0, 490) // 10 * 10)
snake_position.append((500, 500))
score += 1
score_text = score_font.render(f'Score: {score}', True, (255, 255, 255), (0, 0, 0))
pygame.init()
# Screen configuration
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption('Snake')
# setting snake
snake = pygame.Surface((10, 10))
snake.fill((0, 255, 0))
snake_position = [(100, 250), (90, 250), (80, 250)]
UP = DOWN = LEFT = RIGHT = False
# setting apple
apple = pygame.Surface((10, 10))
apple.fill((255, 0, 0))
apple_position = (400, 250)
# score
score_font = pygame.font.Font('assets/PressStart2P.ttf', 20)
score_text = score_font.render('Score: 0', True, (255, 255, 255), (0, 0, 0))
score = 0
# game over text
game_ove_font = pygame.font.Font('assets/PressStart2P.ttf', 40)
game_ove_text = game_ove_font.render('Game Over', True, (255, 255, 255), (0, 0, 0))
# Sounds
eating_sound = pygame.mixer.Sound('assets/412068__inspectorj__chewing-carrot-a.wav')
game_ove_sound = pygame.mixer.Sound('assets/533034__evretro__8-bit-game-over-sound-tune.wav')
play_sound = 0
game_loop = True
game_clock = pygame.time.Clock()
while game_loop:
game_clock.tick(20)
# map events
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# map keys
elif event.type == pygame.KEYDOWN:
if not DOWN:
if event.key == pygame.K_UP:
UP = True
DOWN = LEFT = RIGHT = False
if not UP:
if event.key == pygame.K_DOWN:
DOWN = True
UP = LEFT = RIGHT = False
if not RIGHT:
if event.key == pygame.K_LEFT:
LEFT = True
UP = DOWN = RIGHT = False
if not LEFT:
if event.key == pygame.K_RIGHT:
RIGHT = True
UP = DOWN = LEFT = False
if (snake_position[0][0] < 0) or (snake_position[0][1] < 0) or (snake_position[0][0] > 490) or \
(snake_position[0][1] > 490):
if play_sound == 0:
game_ove_sound.play()
play_sound += 1
screen.fill((0, 0, 0))
screen.blit(game_ove_text, (80, 150))
screen.blit(score_text, (150, 250))
pygame.display.update()
else:
# Snake moves
body_snake_move()
# clear e draw
screen.fill((0, 0, 0))
screen.blit(score_text, (150, 0))
screen.blit(apple, apple_position)
for position in snake_position:
screen.blit(snake, position)
pygame.display.update()
| 29.356061 | 100 | 0.577032 |
2780627c3964a271009a1ef08b6e6fa323ec5745 | 2,075 | py | Python | flox_aws/command.py | getflox/flox-aws | 9642e291afa3ddedd9eed566b43a640e9bb0c537 | [
"MIT"
] | null | null | null | flox_aws/command.py | getflox/flox-aws | 9642e291afa3ddedd9eed566b43a640e9bb0c537 | [
"MIT"
] | null | null | null | flox_aws/command.py | getflox/flox-aws | 9642e291afa3ddedd9eed566b43a640e9bb0c537 | [
"MIT"
] | null | null | null | import json
from urllib.parse import quote_plus
import click
import requests
from flox_aws.provider.session import with_aws
from floxcore.context import Flox, EmptyContext
from floxcore.shell import execute_command
@click.group(name="aws", invoke_without_command=True, context_settings=dict(
ignore_unknown_options=True,
))
@click.argument('args', nargs=-1, type=click.UNPROCESSED)
@click.pass_obj
def aws(flox: Flox, args, **kwargs):
"""awscli command wrapper with session credentials provider"""
command_name = next(iter(args), None)
if command_name == "console":
args = list(args)
with EmptyContext(flox, console, args[1:], allow_interspersed_args=True, ignore_unknown_options=True) as ctx:
return console.invoke(ctx)
variables = flox.security_context(["aws"])
execute_command("aws", args, variables)
@aws.command()
@click.option("--show", is_flag=True, default=False, help="Only show login URL rather than opening a browser")
@with_aws
def console(aws_session, show):
"""Generate login URL for AWS console"""
credentials = aws_session.get_credentials()
json_string_with_temp_credentials = json.dumps(dict(
sessionId=credentials.access_key,
sessionKey=credentials.secret_key,
sessionToken=credentials.token,
))
request_parameters = "?Action=getSigninToken"
request_parameters += "&SessionDuration=43200"
request_parameters += "&Session=" + quote_plus(json_string_with_temp_credentials)
request_url = "https://signin.aws.amazon.com/federation" + request_parameters
r = requests.get(request_url)
signin_token = json.loads(r.text)
request_parameters = "?Action=login"
request_parameters += "&Issuer=flox"
request_parameters += "&Destination=" + quote_plus("https://console.aws.amazon.com/")
request_parameters += "&SigninToken=" + signin_token["SigninToken"]
request_url = "https://signin.aws.amazon.com/federation" + request_parameters
if show:
click.echo(request_url)
else:
click.launch(request_url)
| 34.016393 | 117 | 0.727229 |
e66e26d6d6ba237ae12c619c84760e660a6ecea3 | 3,399 | py | Python | python/cuML/test/test_linear_model.py | akshaysubr/cuml | 7fceac26242f0155a5fa5cf1951af29230302e31 | [
"Apache-2.0"
] | 1 | 2019-10-01T15:20:32.000Z | 2019-10-01T15:20:32.000Z | python/cuML/test/test_linear_model.py | akshaysubr/cuml | 7fceac26242f0155a5fa5cf1951af29230302e31 | [
"Apache-2.0"
] | null | null | null | python/cuML/test/test_linear_model.py | akshaysubr/cuml | 7fceac26242f0155a5fa5cf1951af29230302e31 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml import LinearRegression as cuLinearRegression
from cuml import Ridge as cuRidge
from sklearn.linear_model import LinearRegression as skLinearRegression
from sklearn.linear_model import Ridge as skRidge
from utils import array_equal
import cudf
import numpy as np
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('X_type', ['dataframe', 'ndarray'])
@pytest.mark.parametrize('y_type', ['series', 'ndarray'])
@pytest.mark.parametrize('algorithm', ['eig', 'svd'])
def test_ols(datatype, X_type, y_type, algorithm):
X = np.array([[2.0, 5.0], [6.0, 9.0], [2.0, 2.0], [2.0, 3.0]],
dtype=datatype)
y = np.dot(X, np.array([5.0, 10.0]).astype(datatype))
pred_data = np.array([[3.0, 5.0], [2.0, 5.0]]).astype(datatype)
skols = skLinearRegression(fit_intercept=True,
normalize=False)
skols.fit(X, y)
cuols = cuLinearRegression(fit_intercept=True,
normalize=False,
algorithm=algorithm)
if X_type == 'dataframe':
gdf = cudf.DataFrame()
gdf['0'] = np.asarray([2, 6, 2, 2], dtype=datatype)
gdf['1'] = np.asarray([5, 9, 2, 3], dtype=datatype)
cuols.fit(gdf, y)
elif X_type == 'ndarray':
cuols.fit(X, y)
sk_predict = skols.predict(pred_data)
cu_predict = cuols.predict(pred_data).to_array()
print(sk_predict)
print(cu_predict)
# print(skols.coef_)
print(cuols.gdf_datatype)
print(y.dtype)
assert array_equal(sk_predict, cu_predict, 1e-3, with_sign=True)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('X_type', ['dataframe', 'ndarray'])
@pytest.mark.parametrize('y_type', ['series', 'ndarray'])
@pytest.mark.parametrize('algorithm', ['eig', 'svd'])
def test_ridge(datatype, X_type, y_type, algorithm):
X = np.array([[2.0, 5.0], [6.0, 9.0], [2.0, 2.0], [2.0, 3.0]],
dtype=datatype)
y = np.dot(X, np.array([5.0, 10.0]).astype(datatype))
pred_data = np.array([[3.0, 5.0], [2.0, 5.0]]).astype(datatype)
skridge = skRidge(fit_intercept=False,
normalize=False)
skridge.fit(X, y)
curidge = cuRidge(fit_intercept=False,
normalize=False,
solver=algorithm)
if X_type == 'dataframe':
gdf = cudf.DataFrame()
gdf['0'] = np.asarray([2, 6, 2, 2], dtype=datatype)
gdf['1'] = np.asarray([5, 9, 2, 3], dtype=datatype)
curidge.fit(gdf, y)
elif X_type == 'ndarray':
curidge.fit(X, y)
sk_predict = skridge.predict(pred_data)
cu_predict = curidge.predict(pred_data).to_array()
assert array_equal(sk_predict, cu_predict, 1e-3, with_sign=True)
| 33.99 | 74 | 0.640482 |
742c513ae4ab8d4f273cb7e5ea3bb70dd3e55e1b | 8,217 | py | Python | lingua/builder.py | pemistahl/lingua-py | d8e99d24a235ae80c40d57788d6f4d7ba806cb92 | [
"Apache-2.0"
] | 119 | 2022-01-10T20:49:05.000Z | 2022-03-26T12:42:26.000Z | lingua/builder.py | pemistahl/lingua-py | d8e99d24a235ae80c40d57788d6f4d7ba806cb92 | [
"Apache-2.0"
] | 7 | 2022-01-22T18:35:49.000Z | 2022-03-24T20:36:32.000Z | lingua/builder.py | pemistahl/lingua-py | d8e99d24a235ae80c40d57788d6f4d7ba806cb92 | [
"Apache-2.0"
] | 3 | 2022-01-10T23:35:30.000Z | 2022-01-11T11:00:33.000Z | #
# Copyright © 2022 Peter M. Stahl pemistahl@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import FrozenSet, Iterable
from .detector import LanguageDetector
from .isocode import IsoCode639_1, IsoCode639_3
from .language import Language
_MISSING_LANGUAGE_MESSAGE: str = (
"LanguageDetector needs at least 2 languages to choose from"
)
class LanguageDetectorBuilder:
"""This class configures and creates an instance of LanguageDetector."""
def __init__(self, languages: FrozenSet[Language]):
self._languages = languages
self._minimum_relative_distance = 0.0
self._is_every_language_model_preloaded = False
def __repr__(self):
languages = sorted([language.name for language in self._languages])
return (
"LanguageDetectorBuilder("
f"_languages={languages}, "
f"_minimum_relative_distance={self._minimum_relative_distance}, "
f"_is_every_language_model_preloaded={self._is_every_language_model_preloaded})"
)
@classmethod
def from_all_languages(cls) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with all built-in languages.
"""
return cls._from(Language.all())
@classmethod
def from_all_spoken_languages(cls) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with all built-in spoken languages.
"""
return cls._from(Language.all_spoken_ones())
@classmethod
def from_all_languages_with_arabic_script(cls) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with all built-in languages supporting the Arabic script.
"""
return cls._from(Language.all_with_arabic_script())
@classmethod
def from_all_languages_with_cyrillic_script(cls) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with all built-in languages supporting the Cyrillic script.
"""
return cls._from(Language.all_with_cyrillic_script())
@classmethod
def from_all_languages_with_devanagari_script(cls) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with all built-in languages supporting the Devanagari script.
"""
return cls._from(Language.all_with_devanagari_script())
@classmethod
def from_all_languages_with_latin_script(cls) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with all built-in languages supporting the Latin script.
"""
return cls._from(Language.all_with_latin_script())
@classmethod
def from_all_languages_without(
cls, *languages: Language
) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with all built-in languages except those passed to this method.
"""
languages_to_load = Language.all().difference(languages)
if len(languages_to_load) < 2:
raise ValueError(_MISSING_LANGUAGE_MESSAGE)
return cls._from(languages_to_load)
@classmethod
def from_languages(cls, *languages: Language) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with the languages passed to this method.
"""
if len(languages) < 2:
raise ValueError(_MISSING_LANGUAGE_MESSAGE)
return cls._from(languages)
@classmethod
def from_iso_codes_639_1(
cls, *iso_codes: IsoCode639_1
) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with the languages specified by the ISO 639-1 codes passed
to this method.
Raises:
ValueError: if less than two ISO codes are specified
"""
if len(iso_codes) < 2:
raise ValueError(_MISSING_LANGUAGE_MESSAGE)
languages = set()
for iso_code in iso_codes:
language = Language.from_iso_code_639_1(iso_code)
languages.add(language)
return cls._from(languages)
@classmethod
def from_iso_codes_639_3(
cls, *iso_codes: IsoCode639_3
) -> "LanguageDetectorBuilder":
"""Create and return an instance of LanguageDetectorBuilder
with the languages specified by the ISO 639-3 codes passed
to this method.
Raises:
ValueError: if less than two ISO codes are specified
"""
if len(iso_codes) < 2:
raise ValueError(_MISSING_LANGUAGE_MESSAGE)
languages = set()
for iso_code in iso_codes:
language = Language.from_iso_code_639_3(iso_code)
languages.add(language)
return cls._from(languages)
def with_minimum_relative_distance(
self, distance: float
) -> "LanguageDetectorBuilder":
"""Set the desired value for the minimum relative distance measure.
By default, Lingua returns the most likely language for a given
input text. However, there are certain words that are spelled the
same in more than one language. The word 'prologue', for instance,
is both a valid English and French word. Lingua would output either
English or French which might be wrong in the given context.
For cases like that, it is possible to specify a minimum relative
distance that the logarithmized and summed up probabilities for
each possible language have to satisfy.
Be aware that the distance between the language probabilities is
dependent on the length of the input text. The longer the input
text, the larger the distance between the languages. So if you
want to classify very short text phrases, do not set the minimum
relative distance too high. Otherwise you will get most results
returned as None which is the return value for cases where
language detection is not reliably possible.
Raises:
ValueError: if distance is smaller than 0.0 or greater than 0.99
"""
if not 0 <= distance < 1:
raise ValueError(
"Minimum relative distance must lie in between 0.0 and 0.99"
)
self._minimum_relative_distance = distance
return self
def with_preloaded_language_models(self) -> "LanguageDetectorBuilder":
"""Preload all language models when creating the LanguageDetector
instance.
By default, Lingua uses lazy-loading to load only those language
models on demand which are considered relevant by the rule-based
filter engine. For web services, for instance, it is rather
beneficial to preload all language models into memory to avoid
unexpected latency while waiting for the service response. This
method allows to switch between these two loading modes.
"""
self._is_every_language_model_preloaded = True
return self
def build(self) -> LanguageDetector:
"""Create and return the configured LanguageDetector instance."""
return LanguageDetector._from(
self._languages,
self._minimum_relative_distance,
self._is_every_language_model_preloaded,
)
@classmethod
def _from(cls, languages: Iterable[Language]) -> "LanguageDetectorBuilder":
if not isinstance(languages, frozenset):
return LanguageDetectorBuilder(frozenset(languages))
return LanguageDetectorBuilder(languages)
| 40.279412 | 92 | 0.690885 |
e6e8e6f7ae1b8e3713b7a5787388ccd40fbb5b6b | 11,670 | py | Python | Testing/tests/test_car_manager_04.py | MNikov/Python-OOP-October-2020 | a53e4555758ec810605e31e7b2c71b65c49b2332 | [
"MIT"
] | null | null | null | Testing/tests/test_car_manager_04.py | MNikov/Python-OOP-October-2020 | a53e4555758ec810605e31e7b2c71b65c49b2332 | [
"MIT"
] | null | null | null | Testing/tests/test_car_manager_04.py | MNikov/Python-OOP-October-2020 | a53e4555758ec810605e31e7b2c71b65c49b2332 | [
"MIT"
] | null | null | null | import unittest
# from Testing.code_files.car_manager_04 import Car
class TestCar(unittest.TestCase):
def test_carInit_shouldCreateProperObject(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
expected = [make, model, fuel_consumption, fuel_capacity, 0]
c = Car(make, model, fuel_consumption, fuel_capacity)
self.assertListEqual(expected, [c.make, c.model, fuel_consumption, fuel_capacity, c.fuel_amount])
# --- ADDED FROM DONCHO BUT ARE NOT TESTED IN JUDGE---
def test_carInit_whenNoneMake_shouldRaise(self):
make = None
model = 'test model'
fuel_consumption = 6
fuel_capacity = 60
with self.assertRaises(Exception) as context:
Car(make, model, fuel_consumption, fuel_capacity)
self.assertIsNotNone(context.exception)
def test_carInit_whenEmptyStringMake_shouldRaise(self):
make = ''
model = 'test model'
fuel_consumption = 6
fuel_capacity = 60
with self.assertRaises(Exception) as context:
Car(make, model, fuel_consumption, fuel_capacity)
self.assertIsNotNone(context.exception)
def test_carInit_whenNoneModel_shouldRaise(self):
make = 'test make'
model = None
fuel_consumption = 6
fuel_capacity = 60
with self.assertRaises(Exception) as context:
Car(make, model, fuel_consumption, fuel_capacity)
self.assertIsNotNone(context.exception)
def test_carInit_whenEmptyStringModel_shouldRaise(self):
make = 'test make'
model = ''
fuel_consumption = 6
fuel_capacity = 60
with self.assertRaises(Exception) as context:
Car(make, model, fuel_consumption, fuel_capacity)
self.assertIsNotNone(context.exception)
def test_carInit_whenNegativeFuelConsumption_shouldRaise(self):
make = 'test make'
model = 'test model'
fuel_consumption = -1
fuel_capacity = 60
with self.assertRaises(Exception) as context:
Car(make, model, fuel_consumption, fuel_capacity)
self.assertIsNotNone(context.exception)
def test_carInit_whenZeroFuelConsumption_shouldRaise(self):
make = 'test make'
model = 'test model'
fuel_consumption = 0
fuel_capacity = 60
with self.assertRaises(Exception) as context:
Car(make, model, fuel_consumption, fuel_capacity)
self.assertIsNotNone(context.exception)
def test_carInit_whenNegativeFuelCapacity_shouldRaise(self):
make = 'test make'
model = 'test model'
fuel_consumption = 6
fuel_capacity = -1
with self.assertRaises(Exception) as context:
Car(make, model, fuel_consumption, fuel_capacity)
self.assertIsNotNone(context.exception)
def test_carInit_whenZeroFuelCapacity_shouldRaise(self):
make = 'test make'
model = 'test model'
fuel_consumption = 6
fuel_capacity = 0
with self.assertRaises(Exception) as context:
Car(make, model, fuel_consumption, fuel_capacity)
self.assertIsNotNone(context.exception)
def test_carInit_whenNegativeFuelAmount_shouldRaise(self):
make = 'test make'
model = 'test model'
fuel_consumption = 6
fuel_capacity = 60
params = [make, model, fuel_consumption, fuel_capacity]
car = Car(*params)
with self.assertRaises(Exception) as context:
car.fuel_amount = -1
self.assertIsNotNone(context.exception)
# --- ADDED FROM DONCHO END ---
# SETTERS ARE NOT TESTED BY DONCHO, DID IT ANYWAY
def test_carMakeSetter_whenValidValue_shouldChangeMake(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
c.make = 'new test make'
self.assertEqual('new test make', c.make)
def test_carMakeSetter_whenValueIsNone_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.make = None
self.assertIsNotNone(context.exception)
def test_carMakeSetter_whenValueIsEmtpyString_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.make = ''
self.assertIsNotNone(context.exception)
def test_carModelSetter_whenValidValue_shouldChangeModel(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
c.model = 'new test model'
self.assertEqual('new test model', c.model)
def test_carModelSetter_whenValueIsNone_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.model = None
self.assertIsNotNone(context.exception)
def test_carModelSetter_whenValueIsEmtpyString_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.model = ''
self.assertIsNotNone(context.exception)
def test_carFuelConsumptionSetter_whenValidValue_shouldChangeConsumption(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
c.fuel_consumption = 12
self.assertEqual(12, c.fuel_consumption)
def test_carFuelConsumptionSetter_whenValidIsZero_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.fuel_consumption = 0
self.assertIsNotNone(context.exception)
def test_carFuelConsumptionSetter_whenValidIsNegative_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.fuel_consumption = -1
self.assertIsNotNone(context.exception)
def test_carFuelCapacitySetter_whenValidValue_shouldChangeCapacity(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
c.fuel_capacity = 110
self.assertEqual(110, c.fuel_capacity)
def test_carFuelCapacitySetter_whenValidIsZero_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.fuel_capacity = 0
self.assertIsNotNone(context.exception)
def test_carFuelCapacitySetter_whenValidIsNegative_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.fuel_capacity = -1
self.assertIsNotNone(context.exception)
def test_carFuelAmountSetter_whenValidValue_shouldChangeAmount(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
c.fuel_amount = 50
self.assertEqual(50, c.fuel_amount)
def test_carFuelAmountSetter_whenNegativeValue_shouldChangeAmount(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.fuel_amount = -1
self.assertIsNotNone(context.exception)
# ----------
def test_carRefuel_whenFuelIsValidAndLessThanCapacity_shouldIncreaseAmount(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
c.refuel(50)
self.assertEqual(50, c.fuel_amount)
def test_carRefuel_whenFuelIsValidAndMoreThanCapacity_shouldIncreaseAmount(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
c.refuel(101)
self.assertEqual(fuel_capacity, c.fuel_amount)
def test_carRefuel_whenNegativeFuel_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.refuel(-1)
self.assertIsNotNone(context.exception)
def test_carRefuel_whenZeroFuel_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.refuel(0)
self.assertIsNotNone(context.exception)
def test_carDrive_whenEnoughFuel_shouldDecreaseAmount(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
c.fuel_amount = 100
distance = 50
needed = (distance / 100) * c.fuel_consumption
self.assertEqual(fuel_capacity - needed, c.fuel_amount - needed)
# FROM DONCHO
# make = 'test make'
# model = 'test model'
# fuel_consumption = 6
# fuel_capacity = 60
#
# params = [make, model, fuel_consumption, fuel_capacity]
# car = Car(*params)
#
# car.refuel(car.fuel_capacity)
# distance = 100
# car.drive(distance)
# expected = car.fuel_capacity - car.fuel_consumption * distance / 100
# actual = car.fuel_amount
#
# self.assertEqual(expected, actual)
def test_carDrive_whenNotEnoughFuel_shouldRaiseException(self):
make = 'test make'
model = 'test model'
fuel_consumption = 10
fuel_capacity = 100
c = Car(make, model, fuel_consumption, fuel_capacity)
with self.assertRaises(Exception) as context:
c.drive(1000)
self.assertIsNotNone(context.exception)
if __name__ == '__main__':
unittest.main()
| 35.363636 | 105 | 0.651671 |
90f586accc47773baade36e445f9924643f78d99 | 4,142 | py | Python | samples/interactive-tutorials/product/import_products_gcs.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | 1 | 2022-02-11T14:00:31.000Z | 2022-02-11T14:00:31.000Z | samples/interactive-tutorials/product/import_products_gcs.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | null | null | null | samples/interactive-tutorials/product/import_products_gcs.py | tetiana-karasova/python-retail | b834c1fb16212e59241267e18d38b490e962af7f | [
"Apache-2.0"
] | 2 | 2022-01-28T09:53:16.000Z | 2022-02-07T14:27:38.000Z | # Copyright 2022 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
def main(bucket_name):
# [START retail_import_products_from_gcs]
import time
import google.auth
from google.cloud.retail import (
GcsSource,
ImportErrorsConfig,
ImportProductsRequest,
ProductInputConfig,
ProductServiceClient,
)
project_id = google.auth.default()[1]
# TODO: Developer set the bucket_name
# bucket_name = os.environ["BUCKET_NAME"]
# You can change the branch here. The "default_branch" is set to point to the branch "0"
default_catalog = f"projects/{project_id}/locations/global/catalogs/default_catalog/branches/default_branch"
gcs_bucket = f"gs://{bucket_name}"
gcs_errors_bucket = f"{gcs_bucket}/error"
gcs_products_object = "products.json"
# TO CHECK ERROR HANDLING USE THE JSON WITH INVALID PRODUCT
# gcs_products_object = "products_some_invalid.json"
def get_import_products_gcs_request(gcs_object_name: str):
# TO CHECK ERROR HANDLING PASTE THE INVALID CATALOG NAME HERE:
# default_catalog = "invalid_catalog_name"
gcs_source = GcsSource()
gcs_source.input_uris = [f"{gcs_bucket}/{gcs_object_name}"]
input_config = ProductInputConfig()
input_config.gcs_source = gcs_source
print("GRS source:")
print(gcs_source.input_uris)
errors_config = ImportErrorsConfig()
errors_config.gcs_prefix = gcs_errors_bucket
import_request = ImportProductsRequest()
import_request.parent = default_catalog
import_request.reconciliation_mode = (
ImportProductsRequest.ReconciliationMode.INCREMENTAL
)
import_request.input_config = input_config
import_request.errors_config = errors_config
print("---import products from google cloud source request---")
print(import_request)
return import_request
# call the Retail API to import products
def import_products_from_gcs():
import_gcs_request = get_import_products_gcs_request(gcs_products_object)
gcs_operation = ProductServiceClient().import_products(import_gcs_request)
print("---the operation was started:----")
print(gcs_operation.operation.name)
while not gcs_operation.done():
print("---please wait till operation is done---")
time.sleep(30)
print("---import products operation is done---")
if gcs_operation.metadata is not None:
print("---number of successfully imported products---")
print(gcs_operation.metadata.success_count)
print("---number of failures during the importing---")
print(gcs_operation.metadata.failure_count)
else:
print("---operation.metadata is empty---")
if gcs_operation.result is not None:
print("---operation result:---")
print(gcs_operation.result())
else:
print("---operation.result is empty---")
# The imported products needs to be indexed in the catalog before they become available for search.
print(
"Wait 2-5 minutes till products become indexed in the catalog, after that they will be available for search"
)
import_products_from_gcs()
# [END retail_import_products_from_gcs]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("bucket_name", nargs="?", default=os.environ["BUCKET_NAME"])
args = parser.parse_args()
main(args.bucket_name)
| 35.401709 | 120 | 0.688073 |
df5ded4541fd116b029ee98075516725aca021ac | 2,635 | py | Python | data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/harvester.py | mtna/data-public | aeeee98d60e545440bab18356120fb4493d0a35b | [
"Apache-2.0",
"CC-BY-4.0"
] | 2 | 2020-08-28T21:36:11.000Z | 2021-05-05T16:34:52.000Z | data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/harvester.py | mtna/data-public | aeeee98d60e545440bab18356120fb4493d0a35b | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/harvester.py | mtna/data-public | aeeee98d60e545440bab18356120fb4493d0a35b | [
"Apache-2.0",
"CC-BY-4.0"
] | 2 | 2020-10-20T00:45:11.000Z | 2020-10-20T00:47:16.000Z | import os
import pandas as pd
variables = {
'cycle': 'election_cycle',
'branch': 'race_type',
'modeldate': 'date_modeled',
'candidate_inc': 'name_incumbent',
'candidate_chal': 'name_challenger',
'candidate_3rd': 'name_third',
'evprob_inc': 'votes_ec_prob_incumbent',
'evprob_chal': 'votes_ec_prob_challenger',
'evprob_3rd': 'votes_ec_prob_third',
'total_ev': 'votes_ec_total',
'timestamp': 'simulation_timestamp',
'simulations': 'simulation_count'
}
def clean(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(variables, axis="columns", inplace=True)
# Reformat dates
df['date_modeled'] = pd.to_datetime(df['date_modeled'])
# Code model type
df['model'] = df['model'].map({'polls-only': '0', 'polls-plus': '1'})
# reorder so that the cnt and new are always next to each other in the same order
df = df[['election_cycle', 'race_type', 'name_incumbent', 'name_challenger', 'name_third', 'votes_ec_prob_incumbent',
'votes_ec_prob_challenger', 'votes_ec_prob_third', 'votes_ec_total', 'model', 'date_modeled', 'simulation_timestamp', 'simulation_count']]
# order the records by date
df = df.sort_values(by=['date_modeled'], ascending=False)
return df
if __name__ == "__main__":
path = os.path
# Loop over the files within the raw folder
for filename in sorted(os.listdir('./data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/raw')):
if filename.endswith('.csv') and path.exists(f'./data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/{filename}') == False:
print(filename)
# For each csv file, map the transformed data to its respective file in the harvested folder
data = pd.read_csv(
f"./data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/raw/{filename}", float_precision='round_trip')
df = clean(data)
df.to_csv(
f"./data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/clean/{filename}", index=False)
# write to the latest file (clear and rewrite)
if path.exists(f'./data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/latest.csv'):
open('./data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/latest.csv', 'w').close()
df.to_csv(
f"./data/fivethirtyeight/election-forecasts-2020/presidential-ev-probabilities-2020/latest.csv", index=False)
| 43.916667 | 159 | 0.679696 |
3cffc92f3a50e761b3aad209579d21f6fa17d53d | 6,296 | py | Python | mflat_master.py | ambarqadeer/ALOPqadeer | 8bcef157d6fc11c4ee19a58b3fe77915a82c947c | [
"MIT"
] | null | null | null | mflat_master.py | ambarqadeer/ALOPqadeer | 8bcef157d6fc11c4ee19a58b3fe77915a82c947c | [
"MIT"
] | null | null | null | mflat_master.py | ambarqadeer/ALOPqadeer | 8bcef157d6fc11c4ee19a58b3fe77915a82c947c | [
"MIT"
] | null | null | null | import glob
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from astropy.nddata import CCDData
import astropy.units as u
import ccdproc as ccdp
import os
import pathlib
from ccdproc import ImageFileCollection
from astropy.visualization import hist
import itertools
from astropy.stats import sigma_clip, mad_std
import time
import sys
def create_directories():
if not os.path.exists('Trimmed_Flat'):
os.makedirs('Trimmed_Flat')
print('created directory Trimmed_Flat')
if not os.path.exists('Master_Files'):
os.makedirs('Master_Files')
print('created directory Master_Files')
if not os.path.exists('Trimmed_Flat/subflatsmed'):
os.makedirs('Trimmed_Flat/subflatsmed')
print('created directory Trimmed_Flat/subflatsmed')
if not os.path.exists('Trimmed_Flat/subflatssig'):
os.makedirs('Trimmed_Flat/subflatssig')
print('created directory Trimmed_Flat/subflatssig')
def trim_flat(refresh='2'):
flatcollection = ImageFileCollection('HD115709/flat_SII', ext=4)
flag = 0
tflatpathlist = []
if refresh == '1':
for ccdf, flatn in flatcollection.ccds(return_fname=True, ccd_kwargs={'unit': 'adu'}):
if flag == 0:
print('all flats will be trimmed to :', ccdf.meta['trimsec'])
flag = 1
print('trimming', flatn)
tflat = ccdp.trim_image(ccdf, fits_section=str(ccdf.meta['trimsec']))
tflat.meta['imtype'] = ('trimmed flat', 'type of image')
tflat.meta['taxis1'] = (2048, 'dimension1')
tflat.meta['taxis2'] = (4096, 'dimension2')
tflat.write('Trimmed_Flat/' + flatn[0:8] + '_trim.fits', overwrite=True)
tflatpathlist.append('Trimmed_Flat/' + flatn[0:8] + '_trim.fits')
print('created', len(tflatpathlist), 'trimmed flats')
elif refresh == '2':
try:
tflatcollection = ImageFileCollection('Trimmed_Flat')
tflatpathlist = tflatcollection.files_filtered(imtype='trimmed flat', include_path=True)
print('found', len(tflatpathlist), 'trimmed flats')
except:
print('can\'t locate trimmed flats, create or check directory')
sys.exit(0)
return flatcollection, tflatpathlist
def sub_bias(refresh='2', bias='2'):
tflatcollection = ImageFileCollection('Trimmed_Flat')
if bias == '1':
biaspath = 'Master_Files/mbias_median.fits'
dest = 'Trimmed_Flat/subflatsmed/'
elif bias == '2':
biaspath = 'Master_Files/mbias.fits'
dest = 'Trimmed_Flat/subflatssig/'
if refresh == '1':
subflatpathlist = []
mbias = CCDData.read(biaspath, unit='adu')
for ccdf, flatn in tflatcollection.ccds(imtype='trimmed flat', return_fname=True):
subflat = ccdp.subtract_bias(ccdf, mbias, add_keyword='subbias')
subflat.meta['imtype'] = ('subflat', 'bias subtracted flat')
subflat.write(dest + flatn[0:8] + '_subbias.fits',overwrite=True)
subflatpathlist.append(dest + flatn[0:8] + '_subbias.fits')
else:
try:
subflatcollection = ImageFileCollection(dest)
subflatpathlist = subflatcollection.files_filtered(imtype='subflat', include_path=True)
print('found', len(subflatpathlist), 'subflats')
except:
print('can\'t locate subflats, create or check directory')
sys.exit()
return tflatcollection, subflatpathlist
# create directories to save files
create_directories()
# trim flat files, no refresh returns existing path list
print('do you want to trim the flats? (1. Yes / 2. Read existing files)\n')
tfref = input()
flatcollection, tflatpathlist = trim_flat(tfref)
# subtract bias from flats
refresh = input('do you want to subtract bias from flats? (1. Yes / 2. Read existing files)\n')
bias = input('select which bias to use (1. Median / 2. Sigma clipped average): \n')
tflatcollection, subflatpathlist = sub_bias(refresh, bias)
def combine_flats(refresh='2', method='2'):
if method == '1':
meta = 'med'
source = 'Trimmed_Flat/subflatsmed'
dest = 'Master_Files/mflat_median.fits'
elif method == '2':
meta = 'sig'
source = 'Trimmed_Flat/subflatssig'
dest = 'Master_Files/mflat.fits'
subflatcollection = ImageFileCollection(source)
combtime = 0
if refresh == '1':
print('found', len(subflatcollection.values('file')), 'subflats')
start = time.time()
if method == '1':
mflat = ccdp.combine(subflatcollection.files_filtered(
imtype='subflat', include_path=True),
method='median')
mflat.meta['flatcom'] = 'median'
combtime = time.time() - start
print('combination took', combtime, 'seconds')
elif method == '2':
mflat = ccdp.combine(subflatcollection.files_filtered(imtype='subflat', include_path=True),
sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,
sigma_clip_func=np.nanmedian, sigma_clip_dev_func=mad_std)
mflat.meta['flatcom'] = 'sigma'
combtime = time.time() - start
print('combination took', combtime, 'seconds')
mflat.meta['normmed'] = (np.nanmedian(mflat), 'nanmedian of the master flat')
mflat.meta['subflats'] = meta
mflat.write(dest[0:-5]+'_'+meta+'.fits', overwrite=True)
else:
try:
if method == '1':
mflat = CCDData.read('Master_Files/mflat_median_med.fits', unit='adu')
elif method == '2':
mflat = CCDData.read('Master_Files/mflat_sig.fits', unit='adu')
except:
print('can\'t locate master flat, create or check directory')
sys.exit()
return subflatcollection, mflat, dest, combtime
# combine subflat files to form master flat, metadata contains norm
print('do you want to create master flat again? (1. Yes / 2. Read existing files)\n')
mbref = input()
print('select combination method? (1. median / 2. sigma clipped average)\n')
method = input()
subflatcollection, mflat, mflatpath, combtime = combine_flats(mbref, method) | 41.150327 | 103 | 0.640089 |
ff189cfa3dc4b8f42f5322578b28e1fa9d6abcf8 | 1,136 | py | Python | src/setup.py | spaceone-dev/plugin-googleoauth2-identity-auth | e30e2032aa68a7f3f70fb8afbd1e39aa30d259f9 | [
"Apache-2.0"
] | null | null | null | src/setup.py | spaceone-dev/plugin-googleoauth2-identity-auth | e30e2032aa68a7f3f70fb8afbd1e39aa30d259f9 | [
"Apache-2.0"
] | null | null | null | src/setup.py | spaceone-dev/plugin-googleoauth2-identity-auth | e30e2032aa68a7f3f70fb8afbd1e39aa30d259f9 | [
"Apache-2.0"
] | 1 | 2021-08-19T02:13:02.000Z | 2021-08-19T02:13:02.000Z | #
# Copyright 2020 The SpaceONE Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
with open('VERSION', 'r') as f:
VERSION = f.read().strip()
f.close()
setup(
name='plugin-google-oauth2',
version=VERSION,
description='Google OAuth2 Plugin',
long_description='',
url='https://www.spaceone.dev/',
author='MEGAZONE SpaceONE Team',
author_email='admin@spaceone.dev',
license='Apache License 2.0',
packages=find_packages(),
install_requires=[
'spaceone-core',
'spaceone-api'
],
zip_safe=False,
)
| 29.128205 | 76 | 0.690141 |
a51c5fde566de3aa2e39e5861de8b7d66975e3e9 | 14,091 | py | Python | core/gossip/messages/connect_message.py | NunoEdgarGFlowHub/sawtooth-core | 0288e5fae62b7b730201cb57b06f996a8b02ec33 | [
"Apache-2.0"
] | null | null | null | core/gossip/messages/connect_message.py | NunoEdgarGFlowHub/sawtooth-core | 0288e5fae62b7b730201cb57b06f996a8b02ec33 | [
"Apache-2.0"
] | null | null | null | core/gossip/messages/connect_message.py | NunoEdgarGFlowHub/sawtooth-core | 0288e5fae62b7b730201cb57b06f996a8b02ec33 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
This module implements classes derived from Message for representing
connection requests, connection replies, disconnection requests, and
keep alives. It also defines handler methods to be called when these
message types arrive.
"""
import logging
from gossip import common, message, node
logger = logging.getLogger(__name__)
def send_connection_request(gossiper, peer):
"""Sends a connection request (syn message) to a candidate peer node.
Args:
gossiper (Node): The local node.
peer (Node): The remote node.
"""
logger.info("add node %s, %s, %s", peer, peer.Identifier[:8],
peer.NetAddress)
gossiper.add_node(peer)
request = ConnectSynMessage()
request.NetHost = gossiper.LocalNode.endpoint_host
request.NetPort = gossiper.LocalNode.endpoint_port
request.Name = gossiper.LocalNode.Name
gossiper.send_message(request, peer.Identifier)
def register_message_handlers(gossiper):
"""Registers the connection-related message handlers for a node.
Args:
gossiper (Node): The node to register message handlers on.
"""
gossiper.register_message_handler(ConnectSynMessage,
connect_syn_handler)
gossiper.register_message_handler(ConnectAckMessage,
connect_ack_handler)
gossiper.register_message_handler(ConnectSynAckMessage,
connect_syn_ack_handler)
gossiper.register_message_handler(DisconnectRequestMessage,
disconnect_request_handler)
gossiper.register_message_handler(KeepAliveMessage, keep_alive_handler)
class ConnectSynMessage(message.Message):
"""Connection syn messages are sent to a candidate peer node to initiate a
gossip connection.
Attributes:
ConnectSynMessage.MessageType (str): The class name of the message.
Reliable (bool): Whether or not the message requires reliable
delivery.
NetHost (str): Hostname or IP address identifying the node.
NetPort (int): The remote port number to connect to.
Name (str): The name of the connection.
IsSystemMessage (bool): Whether or not this is a system message.
System messages have special delivery priority rules.
IsForward (bool): Whether the message should be automatically
forwarded.
IsReliable (bool): Whether reliable delivery is required.
"""
MessageType = "/gossip.messages.ConnectMessage/ConnectSyn"
def __init__(self, minfo=None):
"""Constructor for the ConnectSynMessage class.
Args:
minfo (dict): Dictionary of values for message fields.
"""
if minfo is None:
minfo = {}
super(ConnectSynMessage, self).__init__(minfo)
self.Reliable = False
self.NetHost = minfo.get('Host', "127.0.0.1")
self.NetPort = minfo.get('Port', 0)
self.Name = minfo.get('Name')
self.IsSystemMessage = True
self.IsForward = False
self.IsReliable = True
@property
def NetAddress(self):
"""Returns the host and port of the connection request message.
Returns:
ordered pair: (host, port).
"""
return (self.NetHost, self.NetPort)
def dump(self):
"""Dumps a dict containing object attributes.
Returns:
dict: A mapping of object attribute names to values.
"""
result = super(ConnectSynMessage, self).dump()
result['Host'] = self.NetHost
result['Port'] = self.NetPort
result['Name'] = self.Name
return result
def connect_syn_handler(msg, gossiper):
"""Handles connection syn events.
When a connection syn message arrives, the requesting node is added
as a node and an ack message is sent. We do not yet know whether the
requester can hear us, so we'll not set the node to a peer just yet.
Args:
msg (message.Message): The received connection request message.
gossiper (Node): The local node.
"""
if msg.OriginatorID in gossiper.blacklist:
logger.warning('msg originator %s blacklisted', msg.OriginatorID)
return
if msg.SenderID != msg.OriginatorID:
logger.error('connection request must originate from peer; %s not %s',
msg.OriginatorID, msg.SenderID)
return
name = msg.Name
if not name:
name = msg.OriginatorID[:8]
orignode = node.Node(address=msg.NetAddress,
identifier=msg.OriginatorID,
name=name)
gossiper.add_node(orignode)
reply = ConnectAckMessage()
reply.InReplyTo = msg.Identifier
gossiper.send_message(reply, msg.OriginatorID)
class ConnectAckMessage(message.Message):
"""Connection ack messages are sent to a candidate peer node in response to
an incoming connection syn message.
Attributes:
ConnectAckMessage.MessageType (str): The class name of the message.
InReplyTo (str): The node identifier of the originator of the
connection request message.
IsSystemMessage (bool): Whether or not this is a system message.
System messages have special delivery priority rules.
IsForward (bool): Whether the message should be automatically
forwarded.
IsReliable (bool): Whether reliable delivery is required.
"""
MessageType = "/gossip.messages.ConnectMessage/ConnectAck"
def __init__(self, minfo=None):
"""Constructor for the ConnectAckMessage class.
Args:
minfo (dict): Dictionary of values for message fields.
"""
if minfo is None:
minfo = {}
super(ConnectAckMessage, self).__init__(minfo)
self.InReplyTo = minfo.get('InReplyTo', common.NullIdentifier)
self.IsSystemMessage = True
self.IsForward = False
self.IsReliable = True
def dump(self):
"""Dumps a dict containing object attributes.
Returns:
dict: A mapping of object attribute names to values.
"""
result = super(ConnectAckMessage, self).dump()
return result
def connect_ack_handler(msg, gossiper):
"""Handles connection ack events.
When a connection ack message arrives, the local node can reason that 2-way
communication with the replying node is (or was momentarily) possible, so
it promotes the replying node from node to peer, and sends its new peer a
syn_ack message.
Args:
msg (message.Message): The received connection reply message.
gossiper (Node): The local node.
"""
if msg.OriginatorID in gossiper.blacklist:
logger.warning('msg originator %s blacklisted', msg.OriginatorID)
return
logger.info('received connect confirmation from node %s',
gossiper.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8]))
# we have confirmation that this peer is currently up, so add it to our
# list
if msg.OriginatorID in gossiper.NodeMap:
gossiper.NodeMap[msg.OriginatorID].is_peer = True
# send syn_ack back to new peer to demonstrate 2-way communication line
reply = ConnectSynAckMessage()
reply.InReplyTo = msg.Identifier
gossiper.send_message(reply, msg.OriginatorID)
class ConnectSynAckMessage(message.Message):
"""Connection syn_ack messages are sent to a peer node in response to
an incoming connection ack message, in order to allow the remote node to
verify 2-way communication with the local node.
Attributes:
ConnectAckMessage.MessageType (str): The class name of the message.
InReplyTo (str): The node identifier of the originator of the
connection request message.
IsSystemMessage (bool): Whether or not this is a system message.
System messages have special delivery priority rules.
IsForward (bool): Whether the message should be automatically
forwarded.
IsReliable (bool): Whether reliable delivery is required.
"""
MessageType = "/gossip.messages.ConnectMessage/ConnectSynAck"
def __init__(self, minfo=None):
if minfo is None:
minfo = {}
super(ConnectSynAckMessage, self).__init__(minfo)
self.InReplyTo = minfo.get('InReplyTo', common.NullIdentifier)
self.IsSystemMessage = True
self.IsForward = False
self.IsReliable = True
def dump(self):
result = super(ConnectSynAckMessage, self).dump()
return result
def connect_syn_ack_handler(msg, gossiper):
"""Handles connection syn_ack events.
When a connection syn_ack message arrives, the local node can reason that
2-way communication with the replying node is (or was momentarily)
possible, so it promotes the replying node from node to peer, completing
the three way handshake connection protocol.
Args:
msg (message.Message): The received connection reply message.
gossiper (Node): The local node.
"""
if msg.OriginatorID in gossiper.blacklist:
logger.warning('msg originator %s blacklisted', msg.OriginatorID)
return
if msg.SenderID != msg.OriginatorID:
logger.error('connection request must originate from peer; %s not %s',
msg.OriginatorID, msg.SenderID)
return
logger.info('received connect syn_ack from node %s',
gossiper.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8]))
# add peer now that we know 2-directional communication is possible
if msg.OriginatorID in gossiper.NodeMap:
gossiper.NodeMap[msg.OriginatorID].is_peer = True
class DisconnectRequestMessage(message.Message):
"""Disconnection request messages represent a request from a node
to disconnect from the gossip network.
Attributes:
DisconnectRequestMessage.MessageType (str): The class name of the
message.
Reliable (bool): Whether or not the message requires reliable
delivery.
IsSystemMessage (bool): Whether or not this is a system message.
System messages have special delivery priority rules.
IsForward (bool): Whether the message should be automatically
forwarded.
IsReliable (bool): Whether reliable delivery is required.
"""
MessageType = "/ConnectMessage/DisconnectRequest"
def __init__(self, minfo=None):
"""Constructor for the DisconnectRequestMessage class.
Args:
minfo (dict): Dictionary of values for message fields.
"""
if minfo is None:
minfo = {}
super(DisconnectRequestMessage, self).__init__(minfo)
self.Reliable = False
self.IsSystemMessage = True
self.IsForward = False
self.IsReliable = False
def dump(self):
"""Dumps a dict containing object attributes.
Returns:
dict: A mapping of object attribute names to values.
"""
return super(DisconnectRequestMessage, self).dump()
def disconnect_request_handler(msg, gossiper):
"""Handles disconnection request events.
When a disconnection request message arrives, the replying node is
removed as a peer.
Args:
msg (message.Message): The received disconnection request message.
gossiper (Node): The local node.
"""
logger.warn('received disconnect message from node %s',
gossiper.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8]))
# if this node is one of our peers, then drop it
if msg.OriginatorID in gossiper.NodeMap:
logger.warn('mark peer node %s as disabled',
gossiper.NodeMap[msg.OriginatorID])
gossiper.drop_node(msg.OriginatorID)
class KeepAliveMessage(message.Message):
"""Keep alive messages represent a request from a node to keep the
connection alive.
Attributes:
KeepAliveMessage.MessageType (str): The class name of the message.
Reliable (bool): Whether or not the message requires reliable
delivery.
IsSystemMessage (bool): Whether or not this is a system message.
System messages have special delivery priority rules.
IsForward (bool): Whether the message should be automatically
forwarded.
IsReliable (bool): Whether reliable delivery is required.
"""
MessageType = "/gossip.messages.ConnectMessage/KeepAlive"
def __init__(self, minfo=None):
"""Constructor for the KeepAliveMessage class.
Args:
minfo (dict): Dictionary of values for message fields.
"""
if minfo is None:
minfo = {}
super(KeepAliveMessage, self).__init__(minfo)
self.Reliable = False
self.IsSystemMessage = True
self.IsForward = False
self.IsReliable = False
def dump(self):
"""Dumps a dict containing object attributes.
Returns:
dict: A mapping of object attribute names to values.
"""
return super(KeepAliveMessage, self).dump()
def keep_alive_handler(msg, gossiper):
"""Handles keep alive events.
Args:
msg (message.Message): The received disconnection request message.
gossiper (Node): The local node.
"""
pass
| 35.2275 | 80 | 0.662338 |
f4d1b65f1f117ac820fb2f0835226b6d1dbaf3ee | 2,116 | py | Python | theano/tests/unittest_tools.py | josharian/Theano | 724a25692090fee26eebf72f5d046ca8662089c1 | [
"BSD-3-Clause"
] | 1 | 2016-05-07T14:52:38.000Z | 2016-05-07T14:52:38.000Z | theano/tests/unittest_tools.py | josharian/Theano | 724a25692090fee26eebf72f5d046ca8662089c1 | [
"BSD-3-Clause"
] | null | null | null | theano/tests/unittest_tools.py | josharian/Theano | 724a25692090fee26eebf72f5d046ca8662089c1 | [
"BSD-3-Clause"
] | null | null | null | import sys
import numpy
import theano.tensor as T
from theano.configparser import config, AddConfigVar, StrParam
AddConfigVar('unittests.rseed',
"Seed to use for randomized unit tests. Special value 'random' means using a seed of None.",
StrParam(666),
in_c_key=False)
def fetch_seed(pseed=None):
"""
Returns the seed to use for running the unit tests.
If an explicit seed is given, it will be used for seeding numpy's rng.
If not, it will use config.unittest.rseed (its default value is 666).
If config.unittest.rseed is set to "random", it will seed the rng with None,
which is equivalent to seeding with a random seed.
Useful for seeding RandomState objects.
>>> rng = numpy.random.RandomState(unittest_tools.fetch_seed())
"""
seed = pseed or config.unittests.rseed
if seed=='random':
seed = None
try:
if seed:
seed = int(seed)
else:
seed = None
except ValueError:
print >> sys.stderr, 'Error: config.unittests.rseed contains '\
'invalid seed, using None instead'
seed = None
return seed
def seed_rng(pseed=None):
"""
Seeds numpy's random number generator with the value returned by fetch_seed.
Usage: unittest_tools.seed_rng()
"""
seed = fetch_seed(pseed)
if pseed and pseed!=seed:
print >> sys.stderr, 'Warning: using seed given by config.unittests.rseed=%i'\
'instead of seed %i given as parameter' % (seed, pseed)
numpy.random.seed(seed)
return seed
def verify_grad(op, pt, n_tests=2, rng=None, *args, **kwargs):
"""
Wrapper for tensor/basic.py:verify_grad
Takes care of seeding the random number generator if None is given
"""
if rng is None:
seed_rng()
rng = numpy.random
T.verify_grad(op, pt, n_tests, rng, *args, **kwargs)
#
# This supports the following syntax:
#
# try:
# verify_grad(...)
# except verify_grad.E_grad, e:
# print e.num_grad.gf
# print e.analytic_grad
# raise
#
verify_grad.E_grad = T.verify_grad.E_grad
| 28.213333 | 100 | 0.650756 |
6630e3b6856f80f0a2d291ddd85e625d42148607 | 1,505 | py | Python | src/numdifftools/testing.py | peendebak/numdifftools | 10ed8f36f7d1995dd229b3e8fec77c12cf09817d | [
"BSD-3-Clause"
] | 181 | 2015-01-21T07:16:45.000Z | 2022-03-14T20:35:10.000Z | src/numdifftools/testing.py | peendebak/numdifftools | 10ed8f36f7d1995dd229b3e8fec77c12cf09817d | [
"BSD-3-Clause"
] | 58 | 2015-01-21T11:46:55.000Z | 2022-02-21T21:22:31.000Z | src/numdifftools/testing.py | peendebak/numdifftools | 10ed8f36f7d1995dd229b3e8fec77c12cf09817d | [
"BSD-3-Clause"
] | 43 | 2015-02-22T18:03:23.000Z | 2022-02-21T15:48:42.000Z | """
Created on Apr 4, 2016
@author: pab
"""
from __future__ import print_function
import sys
import contextlib
import inspect
import numpy as np
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
def rosen(x):
"""Rosenbrock function
This is a non-convex function used as a performance test problem for
optimization algorithms introduced by Howard H. Rosenbrock in 1960.[1]
"""
x = np.atleast_1d(x)
return (1 - x[0])**2 + 105. * (x[1] - x[0]**2)**2
def test_docstrings(name=''):
# np.set_printoptions(precision=6)
import doctest
if not name:
name = inspect.stack()[1][1]
print('Testing docstrings in {}'.format(name))
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE |
doctest.ELLIPSIS)
@contextlib.contextmanager
def capture_stdout_and_stderr():
"""
Capture sys.stdout and sys.stderr
Examples
--------
>>> from numdifftools.testing import capture_stdout_and_stderr
>>> with capture_stdout_and_stderr() as out:
... print('This is a test')
>>> out[0].startswith('This is a test')
True
>>> out[1] == ''
True
"""
old_out = sys.stdout, sys.stderr
out = [StringIO(), StringIO()]
try:
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = old_out
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
if __name__ == '__main__':
test_docstrings(__file__)
| 23.153846 | 74 | 0.634551 |
b1c5f001468ae5899a287d19087e6ff0b7bd87b6 | 603 | py | Python | examples/utilities_general.py | Sandbergo/ecole | 2bab4d6a66e5d1932870f4cecbdc989b8fd17546 | [
"BSD-3-Clause"
] | null | null | null | examples/utilities_general.py | Sandbergo/ecole | 2bab4d6a66e5d1932870f4cecbdc989b8fd17546 | [
"BSD-3-Clause"
] | null | null | null | examples/utilities_general.py | Sandbergo/ecole | 2bab4d6a66e5d1932870f4cecbdc989b8fd17546 | [
"BSD-3-Clause"
] | null | null | null | import datetime
class Logger:
def __init__(self, filename=None):
self.logfile = f'{filename}_{self.format_time(file=True)}.txt'
def format_time(self, file=False):
t = datetime.datetime.now()
if file:
s = t.strftime('%m%d_%H%M%S')
else:
s = t.strftime('%m-%d %H:%M:%S.%f')
s = s[:-4]
return s
def __call__(self, str: str):
str = f'[{self.format_time()}] {str}'
print(str)
if self.logfile is not None:
with open(self.logfile, mode='a') as f:
print(str, file=f)
| 26.217391 | 70 | 0.515755 |
ffb99c534bd52163e5b8408b452617d5ccb94c84 | 1,477 | py | Python | utils/validations.py | jesseinit/feather-insure | 69222d9302662cbcd85b402c27b7f6951d2f18a0 | [
"MIT"
] | null | null | null | utils/validations.py | jesseinit/feather-insure | 69222d9302662cbcd85b402c27b7f6951d2f18a0 | [
"MIT"
] | null | null | null | utils/validations.py | jesseinit/feather-insure | 69222d9302662cbcd85b402c27b7f6951d2f18a0 | [
"MIT"
] | null | null | null | from functools import wraps
class ValidationError(Exception):
"""Base Validation class for handling validation errors"""
def __init__(self, error, status_code=None):
super().__init__(self)
self.status_code = 400
self.error = error
self.error["status"] = "error"
self.error["message"] = error["message"]
if status_code:
self.status_code = status_code
def to_dict(self):
return self.error
def validate_json_request(request):
"""Decorator function to check for json content type in request"""
def decorator(func):
@wraps(func)
def decorated_function(*args, **kwargs):
if (
not request.data.decode("utf-8")
or not request.get_json(force=True).keys()
):
raise ValidationError(
{"status": "error", "message": "Empty JSON Request"}, 400
)
return func(*args, **kwargs)
return decorated_function
return decorator
def validate_schema(request, schema_instance):
""" A decorator function that validates schema againt request payload """
def decorator(func):
@wraps(func)
def wrapper_function(*args, **kwargs):
json_payload = request.get_json()
schema_instance.load_json_into_schema(json_payload)
return func(*args, **kwargs)
return wrapper_function
return decorator
| 27.351852 | 77 | 0.605958 |
6a8e267da4ee03eac564d6d766a7430eed1de724 | 12,349 | py | Python | util_pipe.py | shingte/CarND-Advanced-Lane-Lines | 96b73ac0b1bc3496618f0e89184a7e6d5a1e18ee | [
"MIT"
] | 4 | 2020-03-24T02:16:08.000Z | 2021-11-25T17:47:49.000Z | util_pipe.py | shingte/CarND-Advanced-Lane-Lines | 96b73ac0b1bc3496618f0e89184a7e6d5a1e18ee | [
"MIT"
] | null | null | null | util_pipe.py | shingte/CarND-Advanced-Lane-Lines | 96b73ac0b1bc3496618f0e89184a7e6d5a1e18ee | [
"MIT"
] | 4 | 2020-09-18T01:08:25.000Z | 2021-11-25T17:48:47.000Z | import numpy as np
import cv2
def abs_sobel_thresh(img, sobel_kernel=3, orient='x', thresh=(0,255)):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the derivative in x or y given orient = 'x' or 'y'
# 3) Take the absolute value of the derivative or gradient
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
# 6) Return this mask as your binary_output image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient=='x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient=='y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
# 2) Take the gradient in x and y separately
# 3) Calculate the magnitude
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
# 5) Create a binary mask where mag thresholds are met
# 6) Return this mask as your binary_output image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
mag = np.sqrt(sobelx**2 + sobely**2)
scaled_mag = np.uint8(255*mag/np.max(mag))
binary_output = np.zeros_like(scaled_mag)
binary_output[(scaled_mag >= mag_thresh[0]) & (scaled_mag <= mag_thresh[1])] = 1
return binary_output
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# Create a binary mask where direction thresholds are me
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return binary_output
def histogram_equalize(img):
r, g, b = cv2.split(img)
red = cv2.equalizeHist(r)
green = cv2.equalizeHist(g)
blue = cv2.equalizeHist(b)
return cv2.merge((red, green, blue))
# Gradient and Color Thresholds
def scale(img, factor=255.0):
scale_factor = np.max(img)/factor
return (img/scale_factor).astype(np.uint8)
def derivative(img, sobel_kernel=3):
derivx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
derivy = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
gradmag = np.sqrt(derivx**2 + derivy**2)
absgraddir = np.arctan2(derivy, derivx)
return scale(derivx), scale(derivy), scale(gradmag), absgraddir
def grad(img, k1=3, k2=15):
_,_,g,_ = derivative(img, sobel_kernel=k1)
_,_,_,p = derivative(img, sobel_kernel=k2)
return g,p
# lambda function of binary operation
land = lambda *x: np.logical_and.reduce(x)
lor = lambda *x: np.logical_or.reduce(x)
# return image in threshold (min, max)
def threshold(img, thresh=(0,255)):
binary_output = np.zeros_like(img)
binary_output[(img >= thresh[0]) & (img <= thresh[1])] = 1
return binary_output
# Test code -
# color = 'hls'
# c0,c1,c2=color_select(img, color)
# util_cal.plt_n([c0,c1,c2],[color[0],color[1],color[2]])
def color_select(img, color):
color = color.upper()
if color == 'RGB':
img2 = img
else:
img2 = cv2.cvtColor(img, eval('cv2.COLOR_RGB2'+color))
c0 = img2[:,:,0]
c1 = img2[:,:,1]
c2 = img2[:,:,2]
return c0,c1,c2
# highlight effect
def highlight(img):
r,g,b = color_select(img, 'rgb')
h,l,s = color_select(img, 'hls')
h0 = threshold(r, (200, 255))
h1 = threshold(g, (200, 255))
h2 = threshold(s, (200, 255))
return scale(lor(land(h0,h1),h2))
def pipeline_hbs(img, thresh=(200,255)):
h=highlight(img)
c=color_threshold(img, thresh)
#h=threshold(h, thresh)
return scale(lor(h,c))
def rgb_select(img, channel='R', thresh=(200, 255)):
if channel == 'G':
X = img[:, :, 1]
elif channel == 'B':
X = img[:, :, 2]
else: # default to R
X = img[:, :, 0]
binary = np.zeros_like(X)
binary[(X > thresh[0]) & (X <= thresh[1])] = 1
return binary
def lab_select(img, channel='B', thresh=(190, 255)):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
if channel == 'L':
X = img[:, :, 0]
elif channel == 'A':
X = img[:, :, 1]
else: # default to B
X = img[:, :, 2]
binary = np.zeros_like(X)
binary[(X > thresh[0]) & (X <= thresh[1])] = 1
return binary
# Define a function that thresholds the channel of HLS
# Use exclusive lower bound (>) and inclusive upper (<=)
def hls_select(img, channel='S', thresh=(220, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
if channel == 'H':
X = hls[:, :, 0]
elif channel == 'L':
X = hls[:, :, 1]
else: # default to S
X = hls[:, :, 2]
binary = np.zeros_like(X)
binary[(X > thresh[0]) & (X <= thresh[1])] = 1
return binary
def color_mask(hsv,low,high):
# Return mask from HSV
mask = cv2.inRange(hsv, low, high)
return mask
def apply_color_mask(hsv,img,low,high):
# Apply color mask to image
mask = cv2.inRange(hsv, low, high)
res = cv2.bitwise_and(img,img, mask= mask)
return res
def apply_yw_mask(img):
image_HSV = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
yellow_hsv_low = np.array([ 0, 100, 100])
yellow_hsv_high = np.array([ 80, 255, 255])
white_hsv_low = np.array([ 0, 0, 160])
white_hsv_high = np.array([ 255, 80, 255])
mask_yellow = color_mask(image_HSV,yellow_hsv_low,yellow_hsv_high)
mask_white = color_mask(image_HSV,white_hsv_low,white_hsv_high)
mask_YW_image = cv2.bitwise_or(mask_yellow,mask_white)
return mask_YW_image
def apply_yw_mask2(img):
image_HSV = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
yellow_hsv_low = np.array([ 50, 50, 50])
yellow_hsv_high = np.array([ 110, 255, 255])
white_hsv_low = np.array([ 200, 200, 200])
white_hsv_high = np.array([ 255, 255, 255])
mask_yellow = color_mask(image_HSV,yellow_hsv_low,yellow_hsv_high)
mask_white = color_mask(image_HSV,white_hsv_low,white_hsv_high)
mask_YW_image = scale(lor(mask_yellow,mask_white))
return mask_YW_image
def color_threshold(img, s_thresh=(0,255), v_thresh=(0,255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
v_channel = hsv[:,:,2]
v_binary = np.zeros_like(v_channel)
v_binary[(v_channel >= v_thresh[0]) & (v_channel <= v_thresh[1])] = 1
# r_binary = rgb_select(img, channel='R', thresh=r_thresh)
output = np.zeros_like(s_channel)
output[(s_binary == 1) & (v_binary == 1)] = 1
return output
# Return the binary Thresholded image by combining multiple binary thresholds
def combine(img, l_thresh=(215,255), b_thresh=(145,255)):
l_channel = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)[:, :, 0] # Detect white lines
b_channel = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)[:, :, 2] # Detect yellow lines
l_binary = np.zeros_like(l_channel)
l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1
b_binary = np.zeros_like(b_channel)
b_binary[(b_channel >= b_thresh[0]) & (b_channel <= b_thresh[1])] = 1
combined_binary = np.zeros_like(b_binary)
combined_binary[(l_binary == 1) | (b_binary == 1)] = 1
return combined_binary, l_binary, b_binary
# pipeline of all binaries
def pipeline_grad(img, x_thresh=(20, 255), r_thresh=(220,255), s_thresh=(100, 255)):
gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=x_thresh)
r_binary = rgb_select(img, channel='R', thresh=r_thresh)
s_binary = hls_select(img, channel='S', thresh=s_thresh)
l_binary = hls_select(img, channel='L', thresh=s_thresh)
yw_binary = apply_yw_mask(img)
yw_binary[(yw_binary !=0)] = 1
# c_binary,_,_ = combine(img)
combined = np.zeros_like(gradx)
combined[(yw_binary == 1) & (r_binary == 1) | (s_binary == 1) ] = 1
# combined[((l_binary == 1) & (s_binary == 1) | (gradx == 1) | (yw_binary == 1))] = 1
# combined[((l_binary == 1) & (yw_binary == 1) | (gradx == 1) | (s_binary == 1) | (r_binary == 1))] = 1
return combined
def pipeline_rsv(img, r_thresh=(180,255), s_thresh=(100,255), v_thresh=(100,255)):
r_binary = rgb_select(img, channel='R', thresh=r_thresh)
c_binary = color_threshold(img, s_thresh, v_thresh)
output = np.zeros_like(r_binary)
output[(r_binary == 1) | (c_binary == 1)] = 1
return output
def pipeline_lb(img, l_thresh=(220,255), b_thresh=(190,255)):
# HLS L-channel Threshold (using default parameters)
l_binary = hls_select(img, channel='L', thresh=l_thresh)
# Lab B-channel Threshold (using default parameters)
b_binary = lab_select(img, channel='B', thresh=b_thresh)
# Combine HLS and Lab B channel thresholds
combined = np.zeros_like(l_binary)
combined[(l_binary == 1) | (b_binary == 1)] = 1
return combined
def pipeline_rlb(img, r_thresh=(220,255), l_thresh=(215,255), b_thresh=(145,255)):
r_binary = rgb_select(img, channel='R', thresh=r_thresh)
c_binary = combine(img, l_thresh, b_thresh)
output = np.zeros_like(r_binary)
output[(r_binary == 1) | (c_binary == 1)] = 1
return output
def pipeline_edge(img, s_thresh=(150, 255), g_thresh=(130,255)):
gray = (0.5*img[:,:,0] + 0.4*img[:,:,1] + 0.1*img[:,:,2]).astype(np.uint8)
g_binary = np.zeros_like(gray)
g_binary[(gray >= g_thresh[0]) & (gray <= g_thresh[1])] = 1
# switch to gray image for laplacian if 's' doesn't give enough details
total_px = img.shape[0]*img.shape[1]
laplacian = cv2.Laplacian(gray, cv2.CV_32F, ksize=21)
mask_one = (laplacian < 0.15*np.min(laplacian)).astype(np.uint8)
if cv2.countNonZero(mask_one)/total_px < 0.01:
laplacian = cv2.Laplacian(gray, cv2.CV_32F, ksize=21)
mask_one = (laplacian < 0.075*np.min(laplacian)).astype(np.uint8)
s_binary = hls_select(img, channel='S', thresh=s_thresh)
mask_two = s_binary
combined = np.zeros_like(g_binary)
combined[((g_binary == 1) & ((mask_one == 1) | (mask_two == 1)) )] = 1
return combined
# Filter the image, showing only a range of white and yellow
def pipeline_YW(image):
# Filter White
threshold = 200
high_threshold = np.array([255, 255, 255]) #Bright white
low_threshold = np.array([threshold, threshold, threshold]) #Soft White
mask = cv2.inRange(image, low_threshold, high_threshold)
white_img = cv2.bitwise_and(image, image, mask=mask)
# Filter Yellow
hsv_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) #Changing Color-space, HSV is better for object detection
#For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255].
high_threshold = np.array([110,255,255]) #Bright Yellow
low_threshold = np.array([50,50,50]) #Soft Yellow
mask = cv2.inRange(hsv_img, low_threshold, high_threshold)
yellow_img = cv2.bitwise_and(image, image, mask=mask)
# Combine the two above images
combined = cv2.addWeighted(white_img, 1., yellow_img, 1., 0.)
gray = cv2.cvtColor(combined, cv2.COLOR_RGB2GRAY)
binary = np.zeros_like(gray)
binary[(gray > 0)] = 1
return binary
| 39.328025 | 111 | 0.635922 |
3909bf8e7d62ac0dc870331f059905ab1865a537 | 7,938 | py | Python | Measures/letor_metrics.py | alfonsoeromero/S2F | fccb741b15acfdeb02ca0de411eb4b00ae73be85 | [
"MIT"
] | 9 | 2019-10-24T18:46:46.000Z | 2022-03-23T13:21:45.000Z | Measures/letor_metrics.py | alfonsoeromero/S2F | fccb741b15acfdeb02ca0de411eb4b00ae73be85 | [
"MIT"
] | 5 | 2022-01-26T18:00:01.000Z | 2022-02-08T14:09:42.000Z | Measures/letor_metrics.py | alfonsoeromero/S2F | fccb741b15acfdeb02ca0de411eb4b00ae73be85 | [
"MIT"
] | 2 | 2022-01-27T12:52:32.000Z | 2022-01-29T12:08:26.000Z | # (C) Mathieu Blondel, November 2013
# License: BSD 3 clause
import numpy as np
def ranking_precision_score(y_true, y_score, k=10):
"""Precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
n_relevant = np.sum(y_true == pos_label)
# Divide by min(n_pos, k) such that the best
# achievable score is always 1.0.
return float(n_relevant) / min(n_pos, k)
def average_precision_score(y_true, y_score, k=10):
"""Average precision at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
Returns
-------
average precision @k : float
"""
unique_y = np.unique(y_true)
if len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1][:min(n_pos, k)]
y_true = np.asarray(y_true)[order]
score = 0
for i in range(len(y_true)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
if n_pos == 0:
return 0
return score / n_pos
def dcg_score(y_true, y_score, k=10, gains="exponential"):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
if gains == "exponential":
gains = 2 ** y_true - 1
elif gains == "linear":
gains = y_true
else:
raise ValueError("Invalid gains option.")
# highest rank is 1 so +2 instead of +1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float
"""
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return actual / best
# Alternative API.
def dcg_from_ranking(y_true, ranking):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
DCG @k : float
"""
y_true = np.asarray(y_true)
ranking = np.asarray(ranking)
rel = y_true[ranking]
gains = 2 ** rel - 1
discounts = np.log2(np.arange(len(ranking)) + 2)
return np.sum(gains / discounts)
def ndcg_from_ranking(y_true, ranking):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
ranking : array-like, shape = [k]
Document indices, i.e.,
ranking[0] is the index of top-ranked document,
ranking[1] is the index of second-ranked document,
...
k : int
Rank.
Returns
-------
NDCG @k : float
"""
k = len(ranking)
best_ranking = np.argsort(y_true)[::-1]
best = dcg_from_ranking(y_true, best_ranking[:k])
return dcg_from_ranking(y_true, ranking) / best
if __name__ == '__main__':
# Check that some rankings are better than others
assert dcg_score([5, 3, 2], [2, 1, 0]) > dcg_score([4, 3, 2], [2, 1, 0])
assert dcg_score([4, 3, 2], [2, 1, 0]) > dcg_score([1, 3, 2], [2, 1, 0])
assert dcg_score([5, 3, 2], [2, 1, 0], k=2) > dcg_score([4, 3, 2],
[2, 1, 0], k=2)
assert dcg_score([4, 3, 2], [2, 1, 0], k=2) > dcg_score([1, 3, 2],
[2, 1, 0], k=2)
# Perfect rankings
assert ndcg_score([5, 3, 2], [2, 1, 0]) == 1.0
assert ndcg_score([2, 3, 5], [0, 1, 2]) == 1.0
assert ndcg_from_ranking([5, 3, 2], [0, 1, 2]) == 1.0
assert ndcg_score([5, 3, 2], [2, 1, 0], k=2) == 1.0
assert ndcg_score([2, 3, 5], [0, 1, 2], k=2) == 1.0
assert ndcg_from_ranking([5, 3, 2], [0, 1]) == 1.0
# Check that sample order is irrelevant
assert dcg_score([5, 3, 2], [2, 1, 0]) == dcg_score([2, 3, 5], [0, 1, 2])
assert dcg_score([5, 3, 2], [2, 1, 0], k=2) == dcg_score([2, 3, 5],
[0, 1, 2], k=2)
# Check equivalence between two interfaces.
assert dcg_score([5, 3, 2], [2, 1, 0]) == dcg_from_ranking([5, 3, 2],
[0, 1, 2])
assert dcg_score([1, 3, 2], [2, 1, 0]) == dcg_from_ranking([1, 3, 2],
[0, 1, 2])
assert dcg_score([1, 3, 2], [0, 2, 1]) == dcg_from_ranking([1, 3, 2],
[1, 2, 0])
assert ndcg_score([1, 3, 2], [2, 1, 0]) == ndcg_from_ranking([1, 3, 2],
[0, 1, 2])
assert dcg_score([5, 3, 2], [2, 1, 0], k=2) == dcg_from_ranking([5, 3, 2],
[0, 1])
assert dcg_score([1, 3, 2], [2, 1, 0], k=2) == dcg_from_ranking([1, 3, 2],
[0, 1])
assert dcg_score([1, 3, 2], [0, 2, 1], k=2) == dcg_from_ranking([1, 3, 2],
[1, 2])
assert ndcg_score([1, 3, 2], [2, 1, 0], k=2) == \
ndcg_from_ranking([1, 3, 2], [0, 1])
# Precision
assert ranking_precision_score([1, 1, 0], [3, 2, 1], k=2) == 1.0
assert ranking_precision_score([1, 1, 0], [1, 0, 0.5], k=2) == 0.5
assert ranking_precision_score([1, 1, 0], [3, 2, 1], k=3) == \
ranking_precision_score([1, 1, 0], [1, 0, 0.5], k=3)
# Average precision
from sklearn.metrics import average_precision_score as ap
assert average_precision_score([1, 1, 0], [3, 2, 1]) == ap([1, 1, 0],
[3, 2, 1])
assert average_precision_score([1, 1, 0], [3, 1, 0]) == ap([1, 1, 0],
[3, 1, 0])
| 29.842105 | 78 | 0.508188 |
d7226515f2ffabf9ae7d279e44e0341a818d6ec5 | 2,275 | py | Python | _1327/urls.py | Pottiman/1327 | 1d67cf905c801d998ff1f10e5312c51d76dd44ef | [
"MIT"
] | null | null | null | _1327/urls.py | Pottiman/1327 | 1d67cf905c801d998ff1f10e5312c51d76dd44ef | [
"MIT"
] | null | null | null | _1327/urls.py | Pottiman/1327 | 1d67cf905c801d998ff1f10e5312c51d76dd44ef | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import include, path, register_converter
from _1327.documents import urls as document_urls
from _1327.main import views as main_views
from _1327.main.utils import SlugWithSlashConverter
from _1327.shortlinks import views as shortlinks_views
from _1327.user_management import views as user_management_views
from _1327.user_management.forms import LoginUsernameForm
urlpatterns = [
path("", main_views.index, name="index"),
path("" + settings.MINUTES_URL_NAME + "/", include("_1327.minutes.urls")),
path("" + settings.POLLS_URL_NAME + "/", include("_1327.polls.urls")),
path("documents/", include("_1327.documents.urls")),
path("information_pages/", include("_1327.information_pages.urls")),
path("login", auth_views.LoginView.as_view(template_name="login.html", authentication_form=LoginUsernameForm), name="login"),
path("logout", user_management_views.logout, name="logout"),
path("view_as", user_management_views.view_as, name="view_as"),
path("abbreviation_explanation", main_views.abbreviation_explanation_edit, name="abbreviation_explanation"),
path("menu_items", main_views.menu_items_index, name="menu_items_index"),
path("menu_item/create", main_views.menu_item_create, name="menu_item_create"),
path("menu_item/<int:menu_item_pk>/edit", main_views.menu_item_edit, name="menu_item_edit"),
path("menu_item_delete", main_views.menu_item_delete, name="menu_item_delete"),
path("menu_item/update_order", main_views.menu_items_update_order, name="menu_items_update_order"),
path("shortlinks", shortlinks_views.shortlinks_index, name="shortlinks_index"),
path("shortlink/create", shortlinks_views.shortlink_create, name="shortlink_create"),
path("shortlink/delete", shortlinks_views.shortlink_delete, name="shortlink_delete"),
path("admin/", admin.site.urls),
path("hijack/", include("hijack.urls")),
]
urlpatterns.extend(document_urls.document_urlpatterns)
register_converter(SlugWithSlashConverter, 'slugwithslash')
custom_urlpatterns = [
path("<slugwithslash:title>/edit", main_views.edit, name="edit"),
path("<slugwithslash:title>", main_views.view, name="view"),
]
urlpatterns.extend(custom_urlpatterns)
| 48.404255 | 126 | 0.795604 |
480b33315fdc318fd9c05932d2f30a697ac17d58 | 3,344 | py | Python | paddlespeech/t2s/exps/tacotron2/preprocess.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 1,540 | 2017-11-14T13:26:33.000Z | 2021-11-09T14:05:08.000Z | paddlespeech/t2s/exps/tacotron2/preprocess.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 599 | 2017-11-14T13:19:12.000Z | 2021-11-09T01:58:26.000Z | paddlespeech/t2s/exps/tacotron2/preprocess.py | JiehangXie/PaddleSpeech | 60090b49ec27437127ab62358026dd5bb95fccc7 | [
"Apache-2.0"
] | 449 | 2017-11-14T12:48:46.000Z | 2021-11-06T09:34:33.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pickle
from pathlib import Path
import numpy as np
import tqdm
from paddlespeech.t2s.audio import AudioProcessor
from paddlespeech.t2s.audio import LogMagnitude
from paddlespeech.t2s.datasets import LJSpeechMetaData
from paddlespeech.t2s.exps.tacotron2.config import get_cfg_defaults
from paddlespeech.t2s.frontend import EnglishCharacter
def create_dataset(config, source_path, target_path, verbose=False):
# create output dir
target_path = Path(target_path).expanduser()
mel_path = target_path / "mel"
os.makedirs(mel_path, exist_ok=True)
meta_data = LJSpeechMetaData(source_path)
frontend = EnglishCharacter()
processor = AudioProcessor(
sample_rate=config.data.sample_rate,
n_fft=config.data.n_fft,
n_mels=config.data.n_mels,
win_length=config.data.win_length,
hop_length=config.data.hop_length,
fmax=config.data.fmax,
fmin=config.data.fmin)
normalizer = LogMagnitude()
records = []
for (fname, text, _) in tqdm.tqdm(meta_data):
wav = processor.read_wav(fname)
mel = processor.mel_spectrogram(wav)
mel = normalizer.transform(mel)
ids = frontend(text)
mel_name = os.path.splitext(os.path.basename(fname))[0]
# save mel spectrogram
records.append((mel_name, text, ids))
np.save(mel_path / mel_name, mel)
if verbose:
print("save mel spectrograms into {}".format(mel_path))
# save meta data as pickle archive
with open(target_path / "metadata.pkl", 'wb') as f:
pickle.dump(records, f)
if verbose:
print("saved metadata into {}".format(target_path / "metadata.pkl"))
print("Done.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="create dataset")
parser.add_argument(
"--config",
type=str,
metavar="FILE",
help="extra config to overwrite the default config")
parser.add_argument(
"--input", type=str, help="path of the ljspeech dataset")
parser.add_argument(
"--output", type=str, help="path to save output dataset")
parser.add_argument(
"--opts",
nargs=argparse.REMAINDER,
help="options to overwrite --config file and the default config, passing in KEY VALUE pairs"
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="print msg")
config = get_cfg_defaults()
args = parser.parse_args()
if args.config:
config.merge_from_file(args.config)
if args.opts:
config.merge_from_list(args.opts)
config.freeze()
print(config.data)
create_dataset(config, args.input, args.output, args.verbose)
| 33.777778 | 100 | 0.691687 |
46e3ff53fb1b941d4c468448c6fb18d3fa28aa71 | 1,249 | py | Python | valid8/tests/validation_lib/test_validators_comparables.py | smarie/python-validate | c8a10ccede1c0782355439b0966f532bf00dfcab | [
"BSD-3-Clause"
] | 26 | 2018-01-10T03:44:19.000Z | 2021-11-28T07:56:31.000Z | valid8/tests/validation_lib/test_validators_comparables.py | smarie/python-validate | c8a10ccede1c0782355439b0966f532bf00dfcab | [
"BSD-3-Clause"
] | 55 | 2017-11-06T14:45:47.000Z | 2021-05-12T08:28:11.000Z | valid8/tests/validation_lib/test_validators_comparables.py | smarie/python-valid8 | c8a10ccede1c0782355439b0966f532bf00dfcab | [
"BSD-3-Clause"
] | null | null | null | import pytest
from valid8.validation_lib import gt, gts, lt, lts, between, NotInRange, TooSmall, TooBig
def test_gt():
""" tests that the gt() function works """
assert gt(1)(1)
with pytest.raises(TooSmall):
gt(-1)(-1.1)
def test_gts():
""" tests that the gts() function works """
with pytest.raises(TooSmall):
gts(1)(1)
assert gts(-1)(-0.9)
def test_lt():
""" tests that the lt() function works """
assert lt(1)(1)
with pytest.raises(TooBig):
lt(-1)(-0.9)
def test_lts():
""" tests that the lts() function works """
with pytest.raises(TooBig):
lts(1)(1)
assert lts(-1)(-1.1)
def test_between():
""" tests that the between() function works """
assert between(0, 1)(0)
assert between(0, 1)(1)
with pytest.raises(NotInRange):
between(0, 1)(-0.1)
with pytest.raises(NotInRange):
between(0, 1)(1.1)
def test_numpy_nan():
""" Test that a numpy nan is correctly handled """
import numpy as np
with pytest.raises(TooSmall) as exc_info:
gt(5.1)(np.nan)
with pytest.raises(TooBig) as exc_info:
lt(5.1)(np.nan)
with pytest.raises(NotInRange) as exc_info:
between(5.1, 5.2)(np.nan)
| 21.534483 | 89 | 0.598078 |
5cddb2bbba54b1a8a20b582535a7da437cded1b3 | 31,442 | py | Python | zarc/old/models_2017-05-20-17:06:44.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 1 | 2019-06-03T16:20:35.000Z | 2019-06-03T16:20:35.000Z | zarc/old/models_2017-05-20-17:06:44.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 20 | 2020-01-28T22:02:29.000Z | 2022-03-29T22:28:34.000Z | zarc/old/models_2017-05-20-17:06:44.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 1 | 2019-06-10T17:20:48.000Z | 2019-06-10T17:20:48.000Z | # coding: utf-8
# AUTOGENERATED BY gen_script.sh from kpony3.py
# Copyright (C) Nyimbi Odero, Sat May 20 16:40:17 EAT 2017
from sqlalchemy import func
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn, ImageColumn, UserExtensionMixin
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.filemanager import ImageManager
from sqlalchemy_utils import aggregated
from sqlalchemy.orm import relationship, query, defer, deferred
from sqlalchemy import (Column, Integer, String, ForeignKey,
Sequence, Float, Text, BigInteger, Date,
DateTime, Time, Boolean, CheckConstraint,
UniqueConstraint, LargeBinary , Table)
from datetime import timedelta, datetime, date
from sqlalchemy.dialects.postgresql import *
from .mixins import *
# Here is how to extend the User model
#class UserExtended(Model, UserExtensionMixin):
# contact_group_id = Column(Integer, ForeignKey('contact_group.id'), nullable=True)
# contact_group = relationship('ContactGroup')
class Attorney(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'attorney'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
id = Column(Integer, primary_key=True, autoincrement=True)
law_firm = Column(ForeignKey(u'lawfirm.id'), index=True)
barnumber = Column(String(20), nullable=False)
lawfirm = relationship(u'Lawfirm', primaryjoin='Attorney.law_firm == Lawfirm.id', backref=u'attorneys')
hearing = relationship(u'Hearing', secondary='attorney_hearing', backref=u'attorneys')
attorney_hearing = Table(
'attorney_hearing', Model.metadata,
Column('attorney', ForeignKey(u'attorney.id'), primary_key=True, nullable=False),
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False, index=True)
)
class Bail(AuditMixin, Model):
__tablename__ = 'bail'
id = Column(Integer, primary_key=True, autoincrement=True)
hearing = Column(ForeignKey(u'hearing.id'), nullable=False, index=True)
defendant = Column(ForeignKey(u'defendant.id'), nullable=False, index=True)
amountgranted = Column(Float(53), nullable=False)
noofsureties = Column(Integer, nullable=False)
paid = Column(Boolean, nullable=False)
paydate = Column(Date, nullable=False)
receiptno = Column(String(100), nullable=False)
defendant1 = relationship(u'Defendant', primaryjoin='Bail.defendant == Defendant.id', backref=u'bails')
hearing1 = relationship(u'Hearing', primaryjoin='Bail.hearing == Hearing.id', backref=u'bails')
surety = relationship(u'Surety', secondary='bail_surety', backref=u'bails')
bail_surety = Table(
'bail_surety', Model.metadata,
Column('bail', ForeignKey(u'bail.id'), primary_key=True, nullable=False),
Column('surety', ForeignKey(u'surety.id'), primary_key=True, nullable=False, index=True)
)
class Case(AuditMixin, Model):
__tablename__ = 'case'
id = Column(Integer, primary_key=True, autoincrement=True)
casename = Column(String(200), nullable=False)
investigationassigmentdate = Column(DateTime)
investigationassignmentnote = Column(Text, nullable=False)
investigationplan = Column(Text, nullable=False)
initialreport = Column(Text, nullable=False)
priority = Column(Integer, nullable=False)
investigationsummary = Column(Text, nullable=False)
agadvicerequested = Column(Boolean, nullable=False)
sendtotrial = Column(Boolean, nullable=False)
chargedate = Column(DateTime)
agadvice = Column(Text, nullable=False)
taketotrial = Column(Boolean, nullable=False)
caseclosed = Column(Boolean, nullable=False)
judgement = Column(Text, nullable=False)
closeddate = Column(Date, nullable=False)
sentencelength = Column(Integer, nullable=False)
sentencestartdate = Column(Date, nullable=False)
sentenceexpirydate = Column(Date, nullable=False)
fineamount = Column(Float(53), nullable=False)
caseappealed = Column(Boolean, nullable=False)
appealdate = Column(DateTime, nullable=False)
natureofsuit = relationship(u'Natureofsuit', secondary='case_natureofsuit', backref=u'cases')
plaintiff = relationship(u'Plaintiff', secondary='case_plaintiff', backref=u'cases')
policeman = relationship(u'Policeman', secondary='case_policeman_2', backref=u'policeman_cases')
prosecutor = relationship(u'Prosecutor', secondary='case_prosecutor', backref=u'cases')
policeman1 = relationship(u'Policeman', secondary='case_policeman', backref=u'policeman_cases_0')
policestation = relationship(u'Policestation', secondary='case_policestation', backref=u'cases')
observer = relationship(u'Observer', secondary='case_observer', backref=u'cases')
defendant = relationship(u'Defendant', secondary='case_defendant', backref=u'cases')
causeofaction = relationship(u'Causeofaction', secondary='case_causeofaction', backref=u'cases')
case_causeofaction = Table(
'case_causeofaction', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('causeofaction', ForeignKey(u'causeofaction.id'), primary_key=True, nullable=False, index=True)
)
case_defendant = Table(
'case_defendant', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('defendant', ForeignKey(u'defendant.id'), primary_key=True, nullable=False, index=True)
)
case_natureofsuit = Table(
'case_natureofsuit', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('natureofsuit', ForeignKey(u'natureofsuit.id'), primary_key=True, nullable=False, index=True)
)
case_observer = Table(
'case_observer', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('observer', ForeignKey(u'observer.id'), primary_key=True, nullable=False, index=True)
)
case_plaintiff = Table(
'case_plaintiff', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('plaintiff', ForeignKey(u'plaintiff.id'), primary_key=True, nullable=False, index=True)
)
case_policeman = Table(
'case_policeman', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('policeman', ForeignKey(u'policeman.id'), primary_key=True, nullable=False, index=True)
)
case_policeman_2 = Table(
'case_policeman_2', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('policeman', ForeignKey(u'policeman.id'), primary_key=True, nullable=False, index=True)
)
case_policestation = Table(
'case_policestation', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('policestation', ForeignKey(u'policestation.id'), primary_key=True, nullable=False, index=True)
)
case_prosecutor = Table(
'case_prosecutor', Model.metadata,
Column('case', ForeignKey(u'case.id'), primary_key=True, nullable=False),
Column('prosecutor', ForeignKey(u'prosecutor.id'), primary_key=True, nullable=False, index=True)
)
class Causeofaction(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'causeofaction'
id = Column(Integer, primary_key=True, autoincrement=True)
criminal = Column(Boolean, nullable=False)
parent_coa = Column(ForeignKey(u'causeofaction.id'), index=True)
parent = relationship(u'Causeofaction', remote_side=[id], primaryjoin='Causeofaction.parent_coa == Causeofaction.id', backref=u'causeofactions')
filing = relationship(u'Filing', secondary='causeofaction_filing', backref=u'causeofactions')
hearing = relationship(u'Hearing', secondary='causeofaction_hearing', backref=u'causeofactions')
causeofaction_filing = Table(
'causeofaction_filing', Model.metadata,
Column('causeofaction', ForeignKey(u'causeofaction.id'), primary_key=True, nullable=False),
Column('filing', ForeignKey(u'filing.id'), primary_key=True, nullable=False, index=True)
)
causeofaction_hearing = Table(
'causeofaction_hearing', Model.metadata,
Column('causeofaction', ForeignKey(u'causeofaction.id'), primary_key=True, nullable=False),
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False, index=True)
)
class Constituency(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'constituency'
id = Column(Integer, primary_key=True, autoincrement=True)
county = Column(ForeignKey(u'county.id'), nullable=False, index=True)
town = Column(ForeignKey(u'town.id'), index=True)
county1 = relationship(u'County', primaryjoin='Constituency.county == County.id', backref=u'constituencies')
town1 = relationship(u'Town', primaryjoin='Constituency.town == Town.id', backref=u'constituencies')
class County(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'county'
id = Column(Integer, primary_key=True, autoincrement=True)
class Court(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'court'
id = Column(Integer, primary_key=True, autoincrement=True)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
residentmagistrate = Column(String(100))
registrar = Column(String(100), nullable=False)
court_level = Column(ForeignKey(u'courtlevel.id'), nullable=False, index=True)
courtlevel = relationship(u'Courtlevel', primaryjoin='Court.court_level == Courtlevel.id', backref=u'courts')
town1 = relationship(u'Town', primaryjoin='Court.town == Town.id', backref=u'courts')
class Courtlevel(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'courtlevel'
id = Column(Integer, primary_key=True, autoincrement=True)
class Defendant(BiometricMixin, EmploymentMixin, PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'defendant'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
id = Column(Integer, primary_key=True, autoincrement=True)
hearing = relationship(u'Hearing', secondary='defendant_hearing', backref=u'defendants')
defendant_hearing = Table(
'defendant_hearing', Model.metadata,
Column('defendant', ForeignKey(u'defendant.id'), primary_key=True, nullable=False),
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False, index=True)
)
class Doctemplate(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'doctemplate'
id = Column(Integer, primary_key=True, autoincrement=True)
template = Column(Text, nullable=False)
templatejson = Column(JSON, nullable=False)
filing = relationship(u'Filing', secondary='doctemplate_filing', backref=u'doctemplates')
doctemplate_filing = Table(
'doctemplate_filing', Model.metadata,
Column('doctemplate', ForeignKey(u'doctemplate.id'), primary_key=True, nullable=False),
Column('filing', ForeignKey(u'filing.id'), primary_key=True, nullable=False, index=True)
)
class Filing(AuditMixin, Model):
__tablename__ = 'filing'
id = Column(Integer, primary_key=True, autoincrement=True)
filedate = Column(DateTime, nullable=False)
totalfees = Column(Float(53), nullable=False)
filing_attorney = Column(ForeignKey(u'attorney.id'), nullable=False, index=True)
filing_prosecutor = Column(ForeignKey(u'prosecutor.id'), nullable=False, index=True)
receiptnumber = Column(Text)
receiptverified = Column(Boolean, nullable=False)
amountpaid = Column(Float(53), nullable=False)
feebalance = Column(Float(53), nullable=False)
paymenthistory = Column(Text, nullable=False)
doctype = Column(String(100), nullable=False)
doc = Column(Text, nullable=False)
docbin = Column(Text, nullable=False)
docthumbnail = Column(ImageColumn, nullable=False)
docjson = Column(JSON, nullable=False)
pagecount = Column(Integer, nullable=False)
binhash = Column(String(100), nullable=False)
texthash = Column(String(100), nullable=False)
attorney = relationship(u'Attorney', primaryjoin='Filing.filing_attorney == Attorney.id', backref=u'filings')
prosecutor = relationship(u'Prosecutor', primaryjoin='Filing.filing_prosecutor == Prosecutor.id', backref=u'filings')
filingtype = relationship(u'Filingtype', secondary='filing_filingtype', backref=u'filings')
hearing = relationship(u'Hearing', secondary='filing_hearing', backref=u'filings')
filing_filingtype = Table(
'filing_filingtype', Model.metadata,
Column('filing', ForeignKey(u'filing.id'), primary_key=True, nullable=False),
Column('filingtype', ForeignKey(u'filingtype.id'), primary_key=True, nullable=False, index=True)
)
filing_hearing = Table(
'filing_hearing', Model.metadata,
Column('filing', ForeignKey(u'filing.id'), primary_key=True, nullable=False),
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False, index=True)
)
class Filingtype(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'filingtype'
id = Column(Integer, primary_key=True, autoincrement=True)
cost = Column(Float(53), nullable=False)
perpagecost = Column(Float(53), nullable=False)
class Hearing(ActivityMixin, AuditMixin, Model):
__tablename__ = 'hearing'
id = Column(Integer, primary_key=True, autoincrement=True)
hearingdate = Column(DateTime, nullable=False)
adjourned = Column(Boolean, nullable=False)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
court = Column(ForeignKey(u'court.id'), nullable=False, index=True)
hearing_type = Column(ForeignKey(u'hearingtype.id'), nullable=False, index=True)
remandwarrant = Column(Text, nullable=False)
remandlength = Column(Integer)
remanddate = Column(Date, nullable=False)
remandwarrantexpirydate = Column(Date, nullable=False)
nexthearingdate = Column(Date)
finalhearing = Column(Boolean, nullable=False)
transcript = Column(Text, nullable=False)
audio = Column(ImageColumn, nullable=False)
video = Column(ImageColumn, nullable=False)
case1 = relationship(u'Case', primaryjoin='Hearing.case == Case.id', backref=u'hearings')
court1 = relationship(u'Court', primaryjoin='Hearing.court == Court.id', backref=u'hearings')
hearingtype = relationship(u'Hearingtype', primaryjoin='Hearing.hearing_type == Hearingtype.id', backref=u'hearings')
prosecutor = relationship(u'Prosecutor', secondary='hearing_prosecutor', backref=u'hearings')
judge = relationship(u'Judge', secondary='hearing_judge', backref=u'hearings')
policeman = relationship(u'Policeman', secondary='hearing_policeman', backref=u'hearings')
observer = relationship(u'Observer', secondary='hearing_observer', backref=u'hearings')
hearing_judge = Table(
'hearing_judge', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('judge', ForeignKey(u'judge.id'), primary_key=True, nullable=False, index=True)
)
hearing_observer = Table(
'hearing_observer', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('observer', ForeignKey(u'observer.id'), primary_key=True, nullable=False, index=True)
)
hearing_policeman = Table(
'hearing_policeman', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('policeman', ForeignKey(u'policeman.id'), primary_key=True, nullable=False, index=True)
)
hearing_prosecutor = Table(
'hearing_prosecutor', Model.metadata,
Column('hearing', ForeignKey(u'hearing.id'), primary_key=True, nullable=False),
Column('prosecutor', ForeignKey(u'prosecutor.id'), primary_key=True, nullable=False, index=True)
)
class Hearingtype(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'hearingtype'
id = Column(Integer, primary_key=True, autoincrement=True)
class Investigation(AuditMixin, Model):
__tablename__ = 'investigation'
id = Column(Integer, primary_key=True, autoincrement=True)
case = Column(ForeignKey(u'case.id'), nullable=False, index=True)
actiondate = Column(DateTime, nullable=False)
evidence = Column(Text, nullable=False)
narrative = Column(Text, nullable=False)
weather = Column(Text, nullable=False)
location = Column(Text, nullable=False)
case1 = relationship(u'Case', primaryjoin='Investigation.case == Case.id', backref=u'investigations')
observer = relationship(u'Observer', secondary='investigation_observer', backref=u'investigations')
policeman = relationship(u'Policeman', secondary='investigation_policeman', backref=u'investigations')
investigation_observer = Table(
'investigation_observer', Model.metadata,
Column('investigation', ForeignKey(u'investigation.id'), primary_key=True, nullable=False),
Column('observer', ForeignKey(u'observer.id'), primary_key=True, nullable=False, index=True)
)
investigation_policeman = Table(
'investigation_policeman', Model.metadata,
Column('investigation', ForeignKey(u'investigation.id'), primary_key=True, nullable=False),
Column('policeman', ForeignKey(u'policeman.id'), primary_key=True, nullable=False, index=True)
)
class Judge(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'judge'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
id = Column(Integer, primary_key=True, autoincrement=True)
court = Column(ForeignKey(u'court.id'), nullable=False, index=True)
court1 = relationship(u'Court', primaryjoin='Judge.court == Court.id', backref=u'judges')
class Lawfirm(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'lawfirm'
id = Column(Integer, primary_key=True, autoincrement=True)
class Natureofsuit(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'natureofsuit'
id = Column(Integer, primary_key=True, autoincrement=True)
class Observer(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'observer'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
id = Column(Integer, primary_key=True, autoincrement=True)
fordefense = Column(Boolean, nullable=False)
class Plaintiff(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'plaintiff'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
id = Column(Integer, primary_key=True, autoincrement=True)
class Policeman(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'policeman'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
id = Column(Integer, primary_key=True, autoincrement=True)
policerole = relationship(u'Policerole', secondary='policerole_policeman', backref=u'policemen')
class Policerole(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'policerole'
id = Column(Integer, primary_key=True, autoincrement=True)
policerole_policeman = Table(
'policerole_policeman', Model.metadata,
Column('policerole', ForeignKey(u'policerole.id'), primary_key=True, nullable=False),
Column('policeman', ForeignKey(u'policeman.id'), primary_key=True, nullable=False, index=True)
)
class Policestation(AuditMixin, Model):
__tablename__ = 'policestation'
id = Column(Integer, primary_key=True, autoincrement=True)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
officercommanding = Column(String(100))
town1 = relationship(u'Town', primaryjoin='Policestation.town == Town.id', backref=u'policestations')
class Prison(AuditMixin, Model):
__tablename__ = 'prison'
id = Column(Integer, primary_key=True, autoincrement=True)
town = Column(ForeignKey(u'town.id'), nullable=False, index=True)
warden = Column(String(100))
capacity = Column(Integer, nullable=False)
population = Column(Integer, nullable=False)
town1 = relationship(u'Town', primaryjoin='Prison.town == Town.id', backref=u'prisons')
class Prisonremand(AuditMixin, Model):
__tablename__ = 'prisonremand'
prison = Column(ForeignKey(u'prison.id'), primary_key=True, nullable=False)
warrantno = Column(String(100), primary_key=True, nullable=False)
hearing = Column(ForeignKey(u'hearing.id'), nullable=False, index=True)
defendant = Column(ForeignKey(u'defendant.id'), nullable=False, index=True)
warrantduration = Column(Integer, nullable=False)
warrantdate = Column(DateTime, nullable=False)
warrant = Column(Text, nullable=False)
warrantexpiry = Column(DateTime, nullable=False)
history = Column(Text, nullable=False)
defendant1 = relationship(u'Defendant', primaryjoin='Prisonremand.defendant == Defendant.id', backref=u'prisonremands')
hearing1 = relationship(u'Hearing', primaryjoin='Prisonremand.hearing == Hearing.id', backref=u'prisonremands')
prison1 = relationship(u'Prison', primaryjoin='Prisonremand.prison == Prison.id', backref=u'prisonremands')
class Prosecutor(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'prosecutor'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
id = Column(Integer, primary_key=True, autoincrement=True)
prosecutorteam = relationship(u'Prosecutorteam', secondary='prosecutor_prosecutorteam', backref=u'prosecutors')
prosecutor_prosecutorteam = Table(
'prosecutor_prosecutorteam', Model.metadata,
Column('prosecutor', ForeignKey(u'prosecutor.id'), primary_key=True, nullable=False),
Column('prosecutorteam', ForeignKey(u'prosecutorteam.id'), primary_key=True, nullable=False, index=True)
)
class Prosecutorteam(AuditMixin, Model):
__tablename__ = 'prosecutorteam'
id = Column(Integer, primary_key=True, autoincrement=True)
class Subcounty(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'subcounty'
id = Column(Integer, primary_key=True, autoincrement=True)
county = Column(ForeignKey(u'county.id'), nullable=False, index=True)
county1 = relationship(u'County', primaryjoin='Subcounty.county == County.id', backref=u'subcounties')
class Surety(PersonMixin, ContactMixin, AuditMixin, Model):
__tablename__ = 'surety'
def photo_img(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
def photo_img_thumbnail(self):
im = ImageManager()
vn = self.ViewName()
if self.photo:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="' + im.get_url_thumbnail(self.photo) +
'" alt="Photo" class="img-rounded img-responsive"></a>')
else:
return Markup('<a href="' + url_for(vn, pk=str(self.id)) +
'" class="thumbnail"><img src="//:0" alt="Photo" class="img-responsive"></a>')
id = Column(Integer, primary_key=True, autoincrement=True)
class Town(RefTypeMixin, AuditMixin, Model):
__tablename__ = 'town'
id = Column(Integer, primary_key=True, autoincrement=True)
subcounty = Column(ForeignKey(u'subcounty.id'), nullable=False, index=True)
subcounty1 = relationship(u'Subcounty', primaryjoin='Town.subcounty == Subcounty.id', backref=u'towns')
| 42.662144 | 148 | 0.652376 |
e14e64c8fc77f5245f8aa94960f6c346518173ec | 1,401 | py | Python | tests/__init__.py | NickKaramoff/double-stream-handler | 27d658b17b15529d5dcdeba51bbc1df7e73a2e9c | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | NickKaramoff/double-stream-handler | 27d658b17b15529d5dcdeba51bbc1df7e73a2e9c | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | NickKaramoff/double-stream-handler | 27d658b17b15529d5dcdeba51bbc1df7e73a2e9c | [
"BSD-3-Clause"
] | null | null | null | import logging
from io import StringIO
from unittest import TestCase, main
from unittest.mock import patch
class DoubleStreamHandlerTestCase(TestCase):
@patch("sys.stderr", new_callable=StringIO)
@patch("sys.stdout", new_callable=StringIO)
def test(self, mock_stdout, mock_stderr):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
custom_out = StringIO()
custom_err = StringIO()
from double_stream_handler import DoubleStreamHandler
handler_default = DoubleStreamHandler()
handler_default.setFormatter(formatter)
handler_custom = DoubleStreamHandler(
err_level=logging.CRITICAL, streams=(custom_out, custom_err)
)
handler_custom.setFormatter(formatter)
logger.addHandler(handler_default)
logger.addHandler(handler_custom)
logger.debug("Debug")
logger.info("Info")
logger.warning("Warning")
logger.error("Error")
logger.critical("Critical")
self.assertEqual(mock_stdout.getvalue(), "Debug\nInfo\n")
self.assertEqual(mock_stderr.getvalue(), "Warning\nError\nCritical\n")
self.assertEqual(custom_out.getvalue(), "Debug\nInfo\nWarning\nError\n")
self.assertEqual(custom_err.getvalue(), "Critical\n")
if __name__ == "__main__":
main()
| 31.133333 | 80 | 0.685225 |
a08c33f20b56414b0833e00a340f48eeeea7d4b2 | 5,675 | py | Python | pypureclient/flashblade/FB_2_3/models/session.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flashblade/FB_2_3/models/session.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flashblade/FB_2_3/models/session.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_3 import models
class Session(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'id': 'str',
'end_time': 'int',
'event': 'str',
'event_count': 'int',
'location': 'str',
'method': 'str',
'start_time': 'int',
'user': 'str',
'user_interface': 'str'
}
attribute_map = {
'name': 'name',
'id': 'id',
'end_time': 'end_time',
'event': 'event',
'event_count': 'event_count',
'location': 'location',
'method': 'method',
'start_time': 'start_time',
'user': 'user',
'user_interface': 'user_interface'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
id=None, # type: str
end_time=None, # type: int
event=None, # type: str
event_count=None, # type: int
location=None, # type: str
method=None, # type: str
start_time=None, # type: int
user=None, # type: str
user_interface=None, # type: str
):
"""
Keyword args:
name (str): Name of the object (e.g., a file system or snapshot).
id (str): A non-modifiable, globally unique ID chosen by the system.
end_time (int): Date and time the user logged out of the Purity//FB interface in milliseconds since UNIX epoch. Set to 0 if the session is still active.
event (str): Description of session events. Valid values include `failed authentication`, `user session`, `login`, `logout`, `API token obtained`, and `request without session`.
event_count (int): Number of session events.
location (str): IP address of the user client connecting to the array or console if connected through local console.
method (str): Method by which the user attempted to log in. Valid values include `API token`, `password`, and `public key`.
start_time (int): Date and time the user logged in to the Purity//FB interface in milliseconds since UNIX epoch.
user (str): Username of the Purity//FB user who triggered the user session event.
user_interface (str): The user interface through which the user session event was performed. Valid values include `CLI`, `GUI`, and `REST`.
"""
if name is not None:
self.name = name
if id is not None:
self.id = id
if end_time is not None:
self.end_time = end_time
if event is not None:
self.event = event
if event_count is not None:
self.event_count = event_count
if location is not None:
self.location = location
if method is not None:
self.method = method
if start_time is not None:
self.start_time = start_time
if user is not None:
self.user = user
if user_interface is not None:
self.user_interface = user_interface
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Session`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Session, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Session):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.186747 | 189 | 0.557885 |
e6415c45202afd1d7a9cf8594723a319133e80f4 | 51,300 | py | Python | openaerostruct/geometry/geometry_mesh_transformations.py | RemyCharayron/OpenAeroStruct | df62a0ef78b444f5bed5505664136f4b3ce51c89 | [
"Apache-2.0"
] | null | null | null | openaerostruct/geometry/geometry_mesh_transformations.py | RemyCharayron/OpenAeroStruct | df62a0ef78b444f5bed5505664136f4b3ce51c89 | [
"Apache-2.0"
] | null | null | null | openaerostruct/geometry/geometry_mesh_transformations.py | RemyCharayron/OpenAeroStruct | df62a0ef78b444f5bed5505664136f4b3ce51c89 | [
"Apache-2.0"
] | null | null | null | """ A set of components that manipulate geometry mesh
based on high-level design parameters. """
import numpy as np
import openmdao.api as om
class Taper(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by altering the spanwise chord linearly to produce
a tapered wing. Note that we apply taper around the quarter-chord line.
Parameters
----------
taper : float
Taper ratio for the wing; 1 is untapered, 0 goes to a point at the tip.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the tapered aerodynamic surface.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val',
desc='Initial value for the taper ratio.')
self.options.declare('mesh',
desc='Nodal mesh defining the initial aerodynamic surface.')
self.options.declare('symmetry', default=False,
desc='Flag set to true if surface is reflected about y=0 plane.')
def setup(self):
mesh = self.options['mesh']
val = self.options['val']
self.add_input('taper', val=val)
self.add_output('mesh', val=mesh, units='m')
self.declare_partials('*', '*')
def compute(self, inputs, outputs):
mesh = self.options['mesh']
symmetry = self.options['symmetry']
taper_ratio = inputs['taper'][0]
# Get mesh parameters and the quarter-chord
le = mesh[0]
te = mesh[-1]
num_x, num_y, _ = mesh.shape
quarter_chord = 0.25 * te + 0.75 * le
x = quarter_chord[:, 1]
span = x[-1] - x[0]
# If symmetric, solve for the correct taper ratio, which is a linear
# interpolation problem
if symmetry:
xp = np.array([-span, 0.])
fp = np.array([taper_ratio, 1.])
# Otherwise, we set up an interpolation problem for the entire wing, which
# consists of two linear segments
else:
xp = np.array([-span/2, 0., span/2])
fp = np.array([taper_ratio, 1., taper_ratio])
taper = np.interp(x.real, xp.real, fp.real)
# Modify the mesh based on the taper amount computed per spanwise section
outputs['mesh'] = np.einsum('ijk,j->ijk', mesh - quarter_chord, taper) + quarter_chord
def compute_partials(self, inputs, partials):
mesh = self.options['mesh']
symmetry = self.options['symmetry']
taper_ratio = inputs['taper'][0]
# Get mesh parameters and the quarter-chord
le = mesh[0]
te = mesh[-1]
num_x, num_y, _ = mesh.shape
quarter_chord = 0.25 * te + 0.75 * le
x = quarter_chord[:, 1]
span = x[-1] - x[0]
# If symmetric, solve for the correct taper ratio, which is a linear
# interpolation problem
if symmetry:
xp = np.array([-span, 0.])
fp = np.array([taper_ratio, 1.])
# Otherwise, we set up an interpolation problem for the entire wing, which
# consists of two linear segments
else:
xp = np.array([-span/2, 0., span/2])
fp = np.array([taper_ratio, 1., taper_ratio])
taper = np.interp(x, xp, fp)
if taper_ratio == 1.:
dtaper = np.zeros(taper.shape)
else:
dtaper = (1.0 - taper) / (1.0 - taper_ratio)
partials['mesh', 'taper'] = np.einsum('ijk, j->ijk', mesh - quarter_chord, dtaper)
class ScaleX(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by modifying the chords along the span of the
wing by scaling only the x-coord.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
chord[ny] : numpy array
Chord length for each panel edge.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh with the new chord lengths.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val', desc='Initial value for chord lengths')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
self.add_input('chord', units='m', val=val)
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
nx, ny, _ = mesh_shape
nn = nx * ny * 3
rows = np.arange(nn)
col = np.tile(np.zeros(3), ny) + np.repeat(np.arange(ny), 3)
cols = np.tile(col, nx)
self.declare_partials('mesh', 'chord', rows=rows, cols=cols)
p_rows = np.arange(nn)
te_rows = np.arange(((nx-1) * ny * 3))
le_rows = te_rows + ny*3
le_cols = np.tile(np.arange(3 * ny), nx-1)
te_cols = le_cols + ny*3*(nx-1)
rows = np.concatenate([p_rows, te_rows, le_rows])
cols = np.concatenate([p_rows, te_cols, le_cols])
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols)
def compute(self, inputs, outputs):
mesh = inputs['in_mesh']
chord_dist = inputs['chord']
te = mesh[-1]
le = mesh[ 0]
quarter_chord = 0.25 * te + 0.75 * le
outputs['mesh'] = np.einsum('ijk,j->ijk', mesh - quarter_chord, chord_dist) + quarter_chord
def compute_partials(self, inputs, partials):
mesh = inputs['in_mesh']
chord_dist = inputs['chord']
te = mesh[-1]
le = mesh[ 0]
quarter_chord = 0.25 * te + 0.75 * le
partials['mesh', 'chord'] = (mesh - quarter_chord).flatten()
nx, ny, _ = mesh.shape
nn = nx * ny * 3
d_mesh = np.einsum('i,ij->ij', chord_dist, np.ones((ny, 3))).flatten()
partials['mesh', 'in_mesh'][:nn] = np.tile(d_mesh, nx)
d_qc = (np.einsum('ij,i->ij', np.ones((ny, 3)), 1.0 - chord_dist)).flatten()
nnq = (nx-1) * ny * 3
partials['mesh', 'in_mesh'][nn:nn + nnq] = np.tile(0.25 * d_qc, nx-1)
partials['mesh', 'in_mesh'][nn + nnq:] = np.tile(0.75 * d_qc, nx-1)
nnq = ny*3
partials['mesh', 'in_mesh'][nn - nnq:nn] += 0.25 * d_qc
partials['mesh', 'in_mesh'][:nnq] += 0.75 * d_qc
class Sweep(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh applying shearing sweep. Positive sweeps back.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
sweep : float
Shearing sweep angle in degrees.
symmetry : boolean
Flag set to true if surface is reflected about y=0 plane.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the swept aerodynamic surface.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val',
desc='Initial value for x shear.')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
self.options.declare('symmetry', default=False,
desc='Flag set to true if surface is reflected about y=0 plane.')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
self.add_input('sweep', val=val, units='deg')
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
nx, ny, _ = mesh_shape
nn = nx * ny
rows = 3 * np.arange(nn)
cols = np.zeros(nn)
self.declare_partials('mesh', 'sweep', rows=rows, cols=cols)
nn = nx * ny * 3
n_rows = np.arange(nn)
if self.options['symmetry']:
y_cp = ny*3 - 2
te_cols = np.tile(y_cp, nx * (ny-1))
te_rows = np.tile(3 * np.arange(ny-1), nx) + np.repeat(3*ny*np.arange(nx), ny-1)
se_cols = np.tile(3 * np.arange(ny-1) + 1, nx)
else:
y_cp = 3*(ny+1) // 2 - 2
n_sym = (ny-1) // 2
te_row = np.tile(3*np.arange(n_sym), 2) + np.repeat([0, 3*(n_sym+1)], n_sym)
te_rows = np.tile(te_row, nx) + np.repeat(3*ny*np.arange(nx), ny-1)
te_col = np.tile(y_cp, n_sym)
se_col1 = 3*np.arange(n_sym) + 1
se_col2 = 3*np.arange(n_sym) + 4 + 3*n_sym
# neat trick: swap columns on reflected side so we can assign in just two operations
te_cols = np.tile(np.concatenate([te_col, se_col2]), nx)
se_cols = np.tile(np.concatenate([se_col1, te_col]), nx)
rows = np.concatenate(([n_rows, te_rows, te_rows]))
cols = np.concatenate(([n_rows, te_cols, se_cols]))
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols)
def compute(self, inputs, outputs):
symmetry = self.options['symmetry']
sweep_angle = inputs['sweep'][0]
mesh = inputs['in_mesh']
# Get the mesh parameters and desired sweep angle
nx, ny, _ = mesh.shape
le = mesh[0]
p180 = np.pi / 180
tan_theta = np.tan(p180*sweep_angle)
# If symmetric, simply vary the x-coord based on the distance from the
# center of the wing
if symmetry:
y0 = le[-1, 1]
dx = -(le[:, 1] - y0) * tan_theta
# Else, vary the x-coord on either side of the wing
else:
ny2 = (ny - 1) // 2
y0 = le[ny2, 1]
dx_right = (le[ny2:, 1] - y0) * tan_theta
dx_left = -(le[:ny2, 1] - y0) * tan_theta
dx = np.hstack((dx_left, dx_right))
# dx added spanwise.
outputs['mesh'][:] = mesh
outputs['mesh'][:, :, 0] += dx
def compute_partials(self, inputs, partials):
symmetry = self.options['symmetry']
sweep_angle = inputs['sweep'][0]
mesh = inputs['in_mesh']
# Get the mesh parameters and desired sweep angle
nx, ny, _ = mesh.shape
le = mesh[0]
p180 = np.pi / 180
tan_theta = np.tan(p180*sweep_angle)
dtan_dtheta = p180 / np.cos(p180*sweep_angle)**2
# If symmetric, simply vary the x-coord based on the distance from the
# center of the wing
if symmetry:
y0 = le[-1, 1]
dx_dtheta = -(le[:, 1] - y0)
# Else, vary the x-coord on either side of the wing
else:
ny2 = (ny - 1) // 2
y0 = le[ny2, 1]
dx_dtheta_right = (le[ny2:, 1] - y0)
dx_dtheta_left = -(le[:ny2, 1] - y0)
dx_dtheta = np.hstack((dx_dtheta_left, dx_dtheta_right))
partials['mesh', 'sweep'] = np.tile(dx_dtheta * dtan_dtheta, nx)
nn = nx * ny * 3
partials['mesh', 'in_mesh'][:nn] = 1.0
nn2 = nx * (ny-1)
partials['mesh', 'in_mesh'][nn:nn + nn2] = tan_theta
partials['mesh', 'in_mesh'][nn + nn2:] = -tan_theta
class ShearX(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by shearing the wing in the x direction
(distributed sweep).
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
xshear[ny] : numpy array
Distance to translate wing in x direction.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh with the new chord lengths.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val', desc='Initial value for x shear.')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
self.add_input('xshear', val=val, units='m')
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
nx, ny, _ = mesh_shape
nn = nx * ny
rows = 3.0*np.arange(nn)
cols = np.tile(np.arange(ny), nx)
val = np.ones(nn)
self.declare_partials('mesh', 'xshear', rows=rows, cols=cols, val=val)
nn = nx * ny * 3
rows = np.arange(nn)
cols = np.arange(nn)
val = np.ones(nn)
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols, val=val)
def compute(self, inputs, outputs):
outputs['mesh'][:] = inputs['in_mesh']
outputs['mesh'][:, :, 0] += inputs['xshear']
class Stretch(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by stretching the mesh in spanwise direction to
reach specified span
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
span : float
Relative stetch ratio in the spanwise direction.
symmetry : boolean
Flag set to true if surface is reflected about y=0 plane.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the stretched aerodynamic surface.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val', desc='Initial value for span.')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
self.options.declare('symmetry', default=False,
desc='Flag set to true if surface is reflected about y=0 plane.')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
self.add_input('span', val=val, units='m')
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
nx, ny, _ = mesh_shape
nn = nx * ny
rows = 3 * np.arange(nn) + 1
cols = np.zeros(nn)
self.declare_partials('mesh', 'span', rows=rows, cols=cols)
# First: x and z on diag is identity.
nn = nx * ny
xz_diag = 3*np.arange(nn)
# Four columns at le (root, tip) and te (root, tip)
i_le0 = 1
i_le1 = ny*3 - 2
i_te0 = (nx-1)*ny*3 + 1
i_te1 = nn*3 - 2
rows_4c = np.tile(3*np.arange(nn) + 1, 4)
cols_4c = np.concatenate([np.tile(i_le0, nn),
np.tile(i_le1, nn),
np.tile(i_te0, nn),
np.tile(i_te1, nn)
])
# Diagonal stripes
base = 3*np.arange(1, ny-1) + 1
row_dg = np.tile(base, nx) + np.repeat(ny*3*np.arange(nx), ny-2)
rows_dg = np.tile(row_dg, 2)
col_dg = np.tile(base, nx)
cols_dg = np.concatenate([col_dg, col_dg + 3*ny*(nx-1)])
rows = np.concatenate([xz_diag, xz_diag + 2, rows_4c, rows_dg])
cols = np.concatenate([xz_diag, xz_diag + 2, cols_4c, cols_dg])
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols)
def compute(self, inputs, outputs):
symmetry = self.options['symmetry']
span = inputs['span'][0]
mesh = inputs['in_mesh']
# Set the span along the quarter-chord line
le = mesh[0]
te = mesh[-1]
quarter_chord = 0.25 * te + 0.75 * le
# The user always deals with the full span, so if they input a specific
# span value and have symmetry enabled, we divide this value by 2.
if symmetry:
span /= 2.
# Compute the previous span and determine the scalar needed to reach the
# desired span
prev_span = quarter_chord[-1, 1] - quarter_chord[0, 1]
s = quarter_chord[:, 1] / prev_span
outputs['mesh'][:] = mesh
outputs['mesh'][:, :, 1] = s * span
def compute_partials(self, inputs, partials):
symmetry = self.options['symmetry']
span = inputs['span'][0]
mesh = inputs['in_mesh']
nx, ny, _ = mesh.shape
# Set the span along the quarter-chord line
le = mesh[0]
te = mesh[-1]
quarter_chord = 0.25 * te + 0.75 * le
# The user always deals with the full span, so if they input a specific
# span value and have symmetry enabled, we divide this value by 2.
if symmetry:
span /= 2.
# Compute the previous span and determine the scalar needed to reach the
# desired span
prev_span = quarter_chord[-1, 1] - quarter_chord[0, 1]
s = quarter_chord[:, 1] / prev_span
d_prev_span = -quarter_chord[:, 1] / prev_span**2
d_prev_span_qc0 = np.zeros((ny, ))
d_prev_span_qc1 = np.zeros((ny, ))
d_prev_span_qc0[0] = d_prev_span_qc1[-1] = 1.0 / prev_span
if symmetry:
partials['mesh', 'span'] = np.tile(0.5 * s, nx)
else:
partials['mesh', 'span'] = np.tile(s, nx)
nn = nx * ny * 2
partials['mesh', 'in_mesh'][:nn] = 1.0
nn2 = nx * ny
partials['mesh', 'in_mesh'][nn:nn + nn2] = np.tile(-0.75 * span * (d_prev_span - d_prev_span_qc0), nx)
nn3 = nn + nn2 * 2
partials['mesh', 'in_mesh'][nn + nn2:nn3] = np.tile(0.75 * span * (d_prev_span + d_prev_span_qc1), nx)
nn4 = nn3 + nn2
partials['mesh', 'in_mesh'][nn3:nn4] = np.tile(-0.25 * span * (d_prev_span - d_prev_span_qc0), nx)
nn5 = nn4 + nn2
partials['mesh', 'in_mesh'][nn4:nn5] = np.tile(0.25 * span * (d_prev_span + d_prev_span_qc1), nx)
nn6 = nn5 + nx*(ny-2)
partials['mesh', 'in_mesh'][nn5:nn6] = 0.75 * span / prev_span
partials['mesh', 'in_mesh'][nn6:] = 0.25 * span / prev_span
class ShearY(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by shearing the wing in the y direction
(distributed sweep).
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
yshear[ny] : numpy array
Distance to translate wing in y direction.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh with the new chord lengths.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val', desc='Initial value for y shear.')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
self.add_input('yshear', val=val, units='m')
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
nx, ny, _ = mesh_shape
nn = nx * ny
rows = 3.0*np.arange(nn) + 1
cols = np.tile(np.arange(ny), nx)
val = np.ones(nn)
self.declare_partials('mesh', 'yshear', rows=rows, cols=cols, val=val)
nn = nx * ny * 3
rows = np.arange(nn)
cols = np.arange(nn)
val = np.ones(nn)
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols, val=val)
def compute(self, inputs, outputs):
outputs['mesh'][:] = inputs['in_mesh']
outputs['mesh'][:, :, 1] += inputs['yshear']
class Dihedral(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by applying dihedral angle. Positive angles up.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
dihedral : float
Dihedral angle in degrees.
symmetry : boolean
Flag set to true if surface is reflected about y=0 plane.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the aerodynamic surface with dihedral angle.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val', desc='Initial value for dihedral.')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
self.options.declare('symmetry', default=False,
desc='Flag set to true if surface is reflected about y=0 plane.')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
self.add_input('dihedral', val=val, units='deg')
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
nx, ny, _ = mesh_shape
nn = nx*ny
rows = 3*np.arange(nn) + 2
cols = np.zeros(nn)
self.declare_partials('mesh', 'dihedral', rows=rows, cols=cols)
nn = nx * ny * 3
n_rows = np.arange(nn)
if self.options['symmetry']:
y_cp = ny*3 - 2
te_cols = np.tile(y_cp, nx * (ny-1))
te_rows = np.tile(3 * np.arange(ny-1) + 2, nx) + np.repeat(3*ny*np.arange(nx), ny-1)
se_cols = np.tile(3 * np.arange(ny-1) + 1, nx)
else:
y_cp = 3*(ny+1) // 2 - 2
n_sym = (ny-1) // 2
te_row = np.tile(3*np.arange(n_sym) + 2, 2) + np.repeat([0, 3*(n_sym+1)], n_sym)
te_rows = np.tile(te_row, nx) + np.repeat(3*ny*np.arange(nx), ny-1)
te_col = np.tile(y_cp, n_sym)
se_col1 = 3*np.arange(n_sym) + 1
se_col2 = 3*np.arange(n_sym) + 4 + 3*n_sym
# neat trick: swap columns on reflected side so we can assign in just two operations
te_cols = np.tile(np.concatenate([te_col, se_col2]), nx)
se_cols = np.tile(np.concatenate([se_col1, te_col]), nx)
rows = np.concatenate(([n_rows, te_rows, te_rows]))
cols = np.concatenate(([n_rows, te_cols, se_cols]))
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols)
def compute(self, inputs, outputs):
symmetry = self.options['symmetry']
dihedral_angle = inputs['dihedral'][0]
mesh = inputs['in_mesh']
# Get the mesh parameters and desired sweep angle
_, ny, _ = mesh.shape
le = mesh[0]
p180 = np.pi / 180
tan_theta = np.tan(p180 * dihedral_angle)
# If symmetric, simply vary the z-coord based on the distance from the
# center of the wing
if symmetry:
y0 = le[-1, 1]
dz = -(le[:, 1] - y0) * tan_theta
else:
ny2 = (ny-1) // 2
y0 = le[ny2, 1]
dz_right = (le[ny2:, 1] - y0) * tan_theta
dz_left = -(le[:ny2, 1] - y0) * tan_theta
dz = np.hstack((dz_left, dz_right))
# dz added spanwise.
outputs['mesh'][:] = mesh
outputs['mesh'][:, :, 2] += dz
def compute_partials(self, inputs, partials):
symmetry = self.options['symmetry']
dihedral_angle = inputs['dihedral'][0]
mesh = inputs['in_mesh']
# Get the mesh parameters and desired sweep angle
nx, ny, _ = mesh.shape
le = mesh[0]
p180 = np.pi / 180
tan_theta = np.tan(p180 * dihedral_angle)
dtan_dangle = p180 / np.cos(p180*dihedral_angle)**2
# If symmetric, simply vary the z-coord based on the distance from the
# center of the wing
if symmetry:
y0 = le[-1, 1]
dz_dtheta = -(le[:, 1] - y0) * dtan_dangle
else:
ny2 = (ny-1) // 2
y0 = le[ny2, 1]
dz_right = (le[ny2:, 1] - y0) * tan_theta
dz_left = -(le[:ny2, 1] - y0) * tan_theta
ddz_right = (le[ny2:, 1] - y0) * dtan_dangle
ddz_left = -(le[:ny2, 1] - y0) * dtan_dangle
dz_dtheta = np.hstack((ddz_left, ddz_right))
# dz added spanwise.
partials['mesh', 'dihedral'] = np.tile(dz_dtheta, nx)
nn = nx * ny * 3
partials['mesh', 'in_mesh'][:nn] = 1.0
nn2 = nx * (ny-1)
partials['mesh', 'in_mesh'][nn:nn + nn2] = tan_theta
partials['mesh', 'in_mesh'][nn + nn2:] = -tan_theta
class ShearZ(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by shearing the wing in the z direction
(distributed sweep).
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
zshear[ny] : numpy array
Distance to translate wing in z direction.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh with the new chord lengths.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val', desc='Initial value for z shear.')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
self.add_input('zshear', val=val, units='m')
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
nx, ny, _ = mesh_shape
nn = nx * ny
rows = 3.0*np.arange(nn) + 2
cols = np.tile(np.arange(ny), nx)
val = np.ones(nn)
self.declare_partials('mesh', 'zshear', rows=rows, cols=cols, val=val)
nn = nx * ny * 3
rows = np.arange(nn)
cols = np.arange(nn)
val = np.ones(nn)
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols, val=val)
def compute(self, inputs, outputs):
outputs['mesh'][:] = inputs['in_mesh']
outputs['mesh'][:, :, 2] += inputs['zshear']
class Rotate(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by compute rotation matrices given mesh and
rotation angles in degrees.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
theta_y[ny] : numpy array
1-D array of rotation angles about y-axis for each wing slice in degrees.
symmetry : boolean
Flag set to True if surface is reflected about y=0 plane.
rotate_x : boolean
Flag set to True if the user desires the twist variable to always be
applied perpendicular to the wing (say, in the case of a winglet).
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the twisted aerodynamic surface.
"""
def initialize(self):
"""
Declare options.
"""
# self.options.declare('val', desc='Initial value for dihedral.')
self.options.declare('val', desc='Initial value for twist.')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
self.options.declare('symmetry', default=False,
desc='Flag set to true if surface is reflected about y=0 plane.')
self.options.declare('rotate_x', default=True,
desc='Flag set to True if the user desires the twist variable to '
'always be applied perpendicular to the wing (say, in the case of '
'a winglet).')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
self.add_input('twist', val=val, units='deg')
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
nx, ny, _ = mesh_shape
nn = nx*ny*3
rows = np.arange(nn)
col = np.tile(np.zeros(3), ny) + np.repeat(np.arange(ny), 3)
cols = np.tile(col, nx)
self.declare_partials('mesh', 'twist', rows=rows, cols=cols)
row_base = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
col_base = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
# Diagonal
nn = nx*ny
dg_row = np.tile(row_base, nn) + np.repeat(3*np.arange(nn), 9)
dg_col = np.tile(col_base, nn) + np.repeat(3*np.arange(nn), 9)
# Leading and Trailing edge on diagonal terms.
row_base_y = np.tile(row_base, ny) + np.repeat(3*np.arange(ny), 9)
col_base_y = np.tile(col_base, ny) + np.repeat(3*np.arange(ny), 9)
nn2 = 3*ny
te_dg_row = np.tile(row_base_y, nx-1) + np.repeat(nn2*np.arange(nx-1), 9*ny)
le_dg_col = np.tile(col_base_y, nx-1)
le_dg_row = te_dg_row + nn2
te_dg_col = le_dg_col + 3 * ny * (nx-1)
# Leading and Trailing edge off diagonal terms.
if self.options['symmetry']:
row_base_y = np.tile(row_base, ny-1) + np.repeat(3*np.arange(ny-1), 9)
col_base_y = np.tile(col_base + 3, ny-1) + np.repeat(3*np.arange(ny-1), 9)
nn2 = 3*ny
te_od_row = np.tile(row_base_y, nx) + np.repeat(nn2*np.arange(nx), 9*(ny-1))
le_od_col = np.tile(col_base_y, nx)
te_od_col = le_od_col + 3 * ny * (nx-1)
rows = np.concatenate([dg_row, le_dg_row, te_dg_row, te_od_row, te_od_row])
cols = np.concatenate([dg_col, le_dg_col, te_dg_col, le_od_col, te_od_col])
else:
n_sym = (ny-1) // 2
row_base_y1 = np.tile(row_base, n_sym) + np.repeat(3*np.arange(n_sym), 9)
col_base_y1 = np.tile(col_base + 3, n_sym) + np.repeat(3*np.arange(n_sym), 9)
row_base_y2 = row_base_y1 + 3*n_sym + 3
col_base_y2 = col_base_y1 + 3*n_sym - 3
nn2 = 3*ny
te_od_row1 = np.tile(row_base_y1, nx) + np.repeat(nn2*np.arange(nx), 9*n_sym)
le_od_col1 = np.tile(col_base_y1, nx)
te_od_col1 = le_od_col1 + 3 * ny * (nx-1)
te_od_row2 = np.tile(row_base_y2, nx) + np.repeat(nn2*np.arange(nx), 9*n_sym)
le_od_col2 = np.tile(col_base_y2, nx)
te_od_col2 = le_od_col2 + 3 * ny * (nx-1)
rows = np.concatenate([dg_row, le_dg_row, te_dg_row, te_od_row1, te_od_row2, te_od_row1, te_od_row2])
cols = np.concatenate([dg_col, le_dg_col, te_dg_col, le_od_col1, le_od_col2, te_od_col1, te_od_col2])
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols)
def compute(self, inputs, outputs):
symmetry = self.options['symmetry']
rotate_x = self.options['rotate_x']
theta_y = inputs['twist']
mesh = inputs['in_mesh']
te = mesh[-1]
le = mesh[ 0]
quarter_chord = 0.25 * te + 0.75 * le
_, ny, _ = mesh.shape
if rotate_x:
# Compute spanwise z displacements along quarter chord
if symmetry:
dz_qc = quarter_chord[:-1, 2] - quarter_chord[1:, 2]
dy_qc = quarter_chord[:-1, 1] - quarter_chord[1:, 1]
theta_x = np.arctan(dz_qc/dy_qc)
# Prepend with 0 so that root is not rotated
rad_theta_x = np.append(theta_x, 0.0)
else:
root_index = int((ny - 1) / 2)
dz_qc_left = quarter_chord[:root_index,2] - quarter_chord[1:root_index+1,2]
dy_qc_left = quarter_chord[:root_index,1] - quarter_chord[1:root_index+1,1]
theta_x_left = np.arctan(dz_qc_left/dy_qc_left)
dz_qc_right = quarter_chord[root_index+1:,2] - quarter_chord[root_index:-1,2]
dy_qc_right = quarter_chord[root_index+1:,1] - quarter_chord[root_index:-1,1]
theta_x_right = np.arctan(dz_qc_right/dy_qc_right)
# Concatenate thetas
rad_theta_x = np.concatenate((theta_x_left, np.zeros(1), theta_x_right))
else:
rad_theta_x = 0.0
rad_theta_y = theta_y * np.pi / 180.
mats = np.zeros((ny, 3, 3), dtype=type(rad_theta_y[0]))
cos_rtx = np.cos(rad_theta_x)
cos_rty = np.cos(rad_theta_y)
sin_rtx = np.sin(rad_theta_x)
sin_rty = np.sin(rad_theta_y)
mats[:, 0, 0] = cos_rty
mats[:, 0, 2] = sin_rty
mats[:, 1, 0] = sin_rtx * sin_rty
mats[:, 1, 1] = cos_rtx
mats[:, 1, 2] = -sin_rtx * cos_rty
mats[:, 2, 0] = -cos_rtx * sin_rty
mats[:, 2, 1] = sin_rtx
mats[:, 2, 2] = cos_rtx*cos_rty
outputs['mesh'] = np.einsum("ikj, mij -> mik", mats, mesh - quarter_chord) + quarter_chord
def compute_partials(self, inputs, partials):
symmetry = self.options['symmetry']
rotate_x = self.options['rotate_x']
theta_y = inputs['twist']
mesh = inputs['in_mesh']
te = mesh[-1]
le = mesh[ 0]
quarter_chord = 0.25 * te + 0.75 * le
nx, ny, _ = mesh.shape
if rotate_x:
# Compute spanwise z displacements along quarter chord
if symmetry:
dz_qc = quarter_chord[:-1,2] - quarter_chord[1:,2]
dy_qc = quarter_chord[:-1,1] - quarter_chord[1:,1]
theta_x = np.arctan(dz_qc/dy_qc)
# Prepend with 0 so that root is not rotated
rad_theta_x = np.append(theta_x, 0.0)
fact = 1.0/(1.0 + (dz_qc/dy_qc)**2)
dthx_dq = np.zeros((ny, 3))
dthx_dq[:-1, 1] = -dz_qc * fact / dy_qc**2
dthx_dq[:-1, 2] = fact / dy_qc
else:
root_index = int((ny - 1) / 2)
dz_qc_left = quarter_chord[:root_index,2] - quarter_chord[1:root_index+1,2]
dy_qc_left = quarter_chord[:root_index,1] - quarter_chord[1:root_index+1,1]
theta_x_left = np.arctan(dz_qc_left/dy_qc_left)
dz_qc_right = quarter_chord[root_index+1:,2] - quarter_chord[root_index:-1,2]
dy_qc_right = quarter_chord[root_index+1:,1] - quarter_chord[root_index:-1,1]
theta_x_right = np.arctan(dz_qc_right/dy_qc_right)
# Concatenate thetas
rad_theta_x = np.concatenate((theta_x_left, np.zeros(1), theta_x_right))
fact_left = 1.0/(1.0 + (dz_qc_left/dy_qc_left)**2)
fact_right = 1.0/(1.0 + (dz_qc_right/dy_qc_right)**2)
dthx_dq = np.zeros((ny, 3))
dthx_dq[:root_index, 1] = -dz_qc_left * fact_left / dy_qc_left**2
dthx_dq[root_index+1:, 1] = -dz_qc_right * fact_right / dy_qc_right**2
dthx_dq[:root_index, 2] = fact_left / dy_qc_left
dthx_dq[root_index+1:, 2] = fact_right / dy_qc_right
else:
rad_theta_x = 0.0
deg2rad = np.pi / 180.
rad_theta_y = theta_y * deg2rad
mats = np.zeros((ny, 3, 3), dtype=type(rad_theta_y[0]))
cos_rtx = np.cos(rad_theta_x)
cos_rty = np.cos(rad_theta_y)
sin_rtx = np.sin(rad_theta_x)
sin_rty = np.sin(rad_theta_y)
mats[:, 0, 0] = cos_rty
mats[:, 0, 2] = sin_rty
mats[:, 1, 0] = sin_rtx * sin_rty
mats[:, 1, 1] = cos_rtx
mats[:, 1, 2] = -sin_rtx * cos_rty
mats[:, 2, 0] = -cos_rtx * sin_rty
mats[:, 2, 1] = sin_rtx
mats[:, 2, 2] = cos_rtx*cos_rty
dmats_dthy = np.zeros((ny, 3, 3))
dmats_dthy[:, 0, 0] = -sin_rty * deg2rad
dmats_dthy[:, 0, 2] = cos_rty * deg2rad
dmats_dthy[:, 1, 0] = sin_rtx * cos_rty * deg2rad
dmats_dthy[:, 1, 2] = sin_rtx * sin_rty * deg2rad
dmats_dthy[:, 2, 0] = -cos_rtx * cos_rty * deg2rad
dmats_dthy[:, 2, 2] = -cos_rtx * sin_rty * deg2rad
d_dthetay = np.einsum("ikj, mij -> mik", dmats_dthy, mesh - quarter_chord)
partials['mesh', 'twist'] = d_dthetay.flatten()
nn = nx*ny*9
partials['mesh', 'in_mesh'][:nn] = np.tile(mats.flatten(), nx)
# Quarter chord direct contribution.
eye = np.tile(np.eye(3).flatten(), ny).reshape(ny, 3, 3)
d_qch = (eye - mats).flatten()
nqc = ny*9
partials['mesh', 'in_mesh'][:nqc] += 0.75 * d_qch
partials['mesh', 'in_mesh'][nn -nqc:nn] += 0.25 * d_qch
if rotate_x:
dmats_dthx = np.zeros((ny, 3, 3))
dmats_dthx[:, 1, 0] = cos_rtx * sin_rty
dmats_dthx[:, 1, 1] = -sin_rtx
dmats_dthx[:, 1, 2] = -cos_rtx * cos_rty
dmats_dthx[:, 2, 0] = sin_rtx * sin_rty
dmats_dthx[:, 2, 1] = cos_rtx
dmats_dthx[:, 2, 2] = -sin_rtx * cos_rty
d_dthetax = np.einsum("ikj, mij -> mik", dmats_dthx, mesh - quarter_chord)
d_dq = np.einsum("ijk, jm -> ijkm", d_dthetax, dthx_dq)
d_dq_flat = d_dq.flatten()
del_n = (nn - 9*ny)
nn2 = nn + del_n
nn3 = nn2 + del_n
partials['mesh', 'in_mesh'][nn:nn2] = 0.75 * d_dq_flat[-del_n:]
partials['mesh', 'in_mesh'][nn2:nn3] = 0.25 * d_dq_flat[:del_n]
# Contribution back to main diagonal.
del_n = 9*ny
partials['mesh', 'in_mesh'][:nqc] += 0.75 * d_dq_flat[:del_n]
partials['mesh', 'in_mesh'][nn-nqc:nn] += 0.25 * d_dq_flat[-del_n:]
# Quarter chord direct contribution.
d_qch_od = np.tile(d_qch.flatten(), nx-1)
partials['mesh', 'in_mesh'][nn:nn2] += 0.75 * d_qch_od
partials['mesh', 'in_mesh'][nn2:nn3] += 0.25 * d_qch_od
# off-off diagonal pieces
if symmetry:
d_dq_flat = d_dq[:, :-1, :, :].flatten()
del_n = (nn - 9*nx)
nn4 = nn3 + del_n
partials['mesh', 'in_mesh'][nn3:nn4] = -0.75 * d_dq_flat
nn5 = nn4 + del_n
partials['mesh', 'in_mesh'][nn4:nn5] = -0.25 * d_dq_flat
else:
d_dq_flat1 = d_dq[:, :root_index, :, :].flatten()
d_dq_flat2 = d_dq[:, root_index + 1:, :, :].flatten()
del_n = nx * root_index * 9
nn4 = nn3 + del_n
partials['mesh', 'in_mesh'][nn3:nn4] = -0.75 * d_dq_flat1
nn5 = nn4 + del_n
partials['mesh', 'in_mesh'][nn4:nn5] = -0.75 * d_dq_flat2
nn6 = nn5 + del_n
partials['mesh', 'in_mesh'][nn5:nn6] = -0.25 * d_dq_flat1
nn7 = nn6 + del_n
partials['mesh', 'in_mesh'][nn6:nn7] = -0.25 * d_dq_flat2
class Dihedral_distrib(om.ExplicitComponent):
"""
OpenMDAO component that manipulates the mesh by applying dihedral angles to the panels of the wing
(different to the Dihedral class in the sense that we can apply different dihedral angles to the panels
of the wing and not only the same angle to all the wing). Positive angles up.
Parameters
----------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the initial aerodynamic surface.
dihedral_distrib[ny-1] : numpy array
1-D array of dihedral angles each wing panel in degrees.
Note that the size is equal to ny-1 because we apply the angles to panels and not to nodes.
symmetry : boolean
Flag set to true if surface is reflected about y=0 plane.
Returns
-------
mesh[nx, ny, 3] : numpy array
Nodal mesh defining the aerodynamic surface with dihedral angles on each panels.
"""
def initialize(self):
"""
Declare options.
"""
self.options.declare('val', desc='Initial value for dihedral_distrib.')
self.options.declare('mesh_shape', desc='Tuple containing mesh shape (nx, ny).')
self.options.declare('symmetry', default=False,
desc='Flag set to true if surface is reflected about y=0 plane.')
def setup(self):
mesh_shape = self.options['mesh_shape']
val = self.options['val']
symmetry = self.options['symmetry']
self.add_input('dihedral_distrib', val=val, units='deg')
self.add_input('in_mesh', shape=mesh_shape, units='m')
self.add_output('mesh', shape=mesh_shape, units='m')
def setup_partials(self):
symmetry = self.options['symmetry']
mesh_shape = mesh_shape = self.options['mesh_shape']
nx, ny, _ = mesh_shape
if(symmetry):
rows = np.array([])
cols = np.array([])
for i in range(nx):
for k in range(int(ny-1)):
rows= np.concatenate((rows, 3*np.arange(int(i*ny),int(i*ny + (ny-1) -k)) +2)) # add the position of the z coord of the points of the i-th edge
cols= np.concatenate((cols, int((ny-1) -(1+k))*np.ones(int((ny-1) -k)))) # add the position of the dihedral angle of the panel which corresponds
self.declare_partials('mesh', 'dihedral_distrib', rows=rows, cols=cols)
nn = nx*ny*3
# the diagonal elements of the d_mesh/d_in_mesh are jacobian matrix are non zero.
rows = np.arange(nn)
cols = np.arange(nn)
for i in range(nx):
for j in range(ny):
if (j < (ny-1)):
for k in range(j,int(ny)):
rows = np.concatenate((rows, np.array([i*ny*3 + j*3+2])))
cols = np.concatenate((cols, np.array([k*3+1])))
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols)
else:
rows = np.array([])
cols = np.array([])
for i in range(nx):
for k in range(int((ny-1)/2)):
rows= np.concatenate((rows, 3*np.arange(int(i*ny),int(i*ny + ((ny-1)/2) -k)) +2)) # add the position of the z coord of the points of the left part of the i-th edge
rows= np.concatenate((rows, 3*np.arange(int(i*ny + ((ny+1)/2) +k), int((i+1)*ny )) +2)) # add the position of the z coord of the points of the right part of the i-th edge
cols= np.concatenate((cols, int(((ny-1)/2) -(1+k))*np.ones(int(((ny-1)/2) -k)))) # add the position of the dihedral angle of the panel which corresponds
cols= np.concatenate((cols, int(((ny-1)/2) +k)*np.ones(int(((ny-1)/2) -k))))
self.declare_partials('mesh', 'dihedral_distrib', rows=rows, cols=cols)
nn = nx*ny*3
# the diagonal elements of the d_mesh/d_in_mesh are jacobian matrix are non zero.
rows = np.arange(nn)
cols = np.arange(nn)
for i in range(nx):
for j in range(ny):
if (j < (ny-1)/2):
for k in range(j,int((ny+1)/2)):
rows = np.concatenate((rows, np.array([i*ny*3 + j*3+2])))
cols = np.concatenate((cols, np.array([k*3+1])))
if (j > (ny-1)/2):
for k in range(int((ny-1)/2), j+1):
rows = np.concatenate((rows, np.array([i*ny*3 + j*3+2])))
cols = np.concatenate((cols, np.array([k*3+1])))
self.declare_partials('mesh', 'in_mesh', rows=rows, cols=cols)
def compute(self, inputs, outputs):
# #######################################################################
# # save the differences in altitude caused by the wing twist
# #(we need to save these differences in order to not overwrite the data introducing
# #a dihedral angle distribution).
# save_twist = np.zeros((nx,ny))
# no_twist_alt = (mesh[0,:,2] + mesh[-1,:,2])/2
# for i in range(nx):
# save_twist[i,:] = mesh[i,:,2] - no_twist_alt
# # set the z coordinates of the points of the leading edge (no wing twist for now):
# le_slope_z_distrib = np.tan(dihedral_angle_distrib) # leading edge slope (z versus y)(around x axis)
# for j in range(int(ny-1)):
# spanwise_dist = np.abs(mesh[0, j, 1] - mesh[0, j+1, 1])
# spanwise_alt_dihedral_without_central_point = le_slope_z_distrib*spanwise_dist
# for j in range(int((ny-1)/2)):
# spanwise_alt_dihedral_without_central_point[j] = np.sum(spanwise_alt_dihedral_without_central_point[j:int((ny-1)/2)])
# spanwise_alt_dihedral_without_central_point[-(j+1)] = np.sum(spanwise_alt_dihedral_without_central_point[::-1][j:int((ny-1)/2)])
# spanwise_alt_with_central_point = np.zeros(ny)
# spanwise_alt_with_central_point[:int((ny-1)/2)] += spanwise_alt_dihedral_without_central_point[:int((ny-1)/2)]
# spanwise_alt_with_central_point[int((ny-1)/2)+1:] += spanwise_alt_dihedral_without_central_point[int((ny-1)/2):]
# mesh[0, :, 2] = spanwise_alt_with_central_point
# # set the z coordinates of all the other points of the mesh (no wing twist -> the z coordinate
# #depends only on the y coordinate, if there was wing twist it will also depends on the
# #x coordinate):
# for j in range(nx):
# mesh[j, :, 2] = mesh[0, :, 2] + save_twist[j,:]
#######################################################################
symmetry = self.options['symmetry']
dihedral_angle_distrib = inputs['dihedral_distrib']
mesh = inputs['in_mesh']
# Get the mesh parameters and desired sweep angle
nx, ny, _ = mesh.shape
p180 = np.pi / 180
dihedral_distrib_rad = p180 * dihedral_angle_distrib
if(symmetry):
# set the z coordinates of the points of the leading edge (no wing twist for now):
le_slope_z_distrib = np.tan(dihedral_distrib_rad) # leading edge slope (z versus y)(around x axis)
spanwise_dist = np.zeros(np.shape(le_slope_z_distrib))
for j in range(int(ny-1)):
spanwise_dist[j] = np.abs(mesh[0, j, 1] - mesh[0, j+1, 1])
spanwise_alt_dihedral_without_central_point = le_slope_z_distrib*spanwise_dist
for j in range(int(ny-1)):
spanwise_alt_dihedral_without_central_point[j] = np.sum(spanwise_alt_dihedral_without_central_point[j:int(ny-1)])
spanwise_alt_with_central_point = np.zeros(ny)
spanwise_alt_with_central_point[:int(ny-1)] += spanwise_alt_dihedral_without_central_point[:int(ny-1)]
else:
# set the z coordinates of the points of the leading edge (no wing twist for now):
le_slope_z_distrib = np.tan(dihedral_distrib_rad) # leading edge slope (z versus y)(around x axis)
spanwise_dist = np.zeros(len(le_slope_z_distrib))
for j in range(int(ny-1)):
spanwise_dist[j] = np.abs(mesh[0, j, 1] - mesh[0, j+1, 1])
spanwise_alt_dihedral_without_central_point = le_slope_z_distrib*spanwise_dist
for j in range(int((ny-1)/2)):
spanwise_alt_dihedral_without_central_point[j] = np.sum(spanwise_alt_dihedral_without_central_point[j:int((ny-1)/2)])
spanwise_alt_dihedral_without_central_point[-(j+1)] = np.sum(spanwise_alt_dihedral_without_central_point[::-1][j:int((ny-1)/2)])
spanwise_alt_with_central_point = np.zeros(ny)
spanwise_alt_with_central_point[:int((ny-1)/2)] += spanwise_alt_dihedral_without_central_point[:int((ny-1)/2)]
spanwise_alt_with_central_point[int((ny-1)/2)+1:] += spanwise_alt_dihedral_without_central_point[int((ny-1)/2):]
outputs['mesh'][:] = mesh
for j in range(nx):
outputs['mesh'][j, :, 2] += spanwise_alt_with_central_point
def compute_partials(self, inputs, partials):
symmetry = self.options['symmetry']
dihedral_angle_distrib = inputs['dihedral_distrib']
mesh = inputs['in_mesh']
nx, ny, _ = mesh.shape
p180 = np.pi / 180 # to pass in radians
tan_dihedral_distrib = np.tan(p180 * dihedral_angle_distrib)
dtan_ddihedral_angle_distrib = p180 / np.cos(p180*dihedral_angle_distrib)**2
mesh_y_diff = mesh[0,1:,1] - mesh[0,:-1,1]
temp = mesh_y_diff * dtan_ddihedral_angle_distrib
if symmetry :
one_edge_ddihedral = np.array([])
for k in range(ny-1):
one_edge_ddihedral = np.concatenate((one_edge_ddihedral, temp[ny-1 - (1+k)]*np.ones(ny-1 - k)))
partials['mesh', 'dihedral_distrib'][:] = np.tile(one_edge_ddihedral, nx)
# print("partials['mesh', 'dihedral_distrib'] =",partials['mesh', 'dihedral_distrib'])
nn = nx * ny * 3
partials['mesh', 'in_mesh'][:nn] = np.ones(nn)
one_edge_din_mesh = np.array([])
for j in range(ny-1): # fill one_edge_din_mesh
v = np.zeros(ny-1 +1 -j)
for k in range(j, ny-1):
v[k - j] += -tan_dihedral_distrib[k]
v[k+1 -j] += tan_dihedral_distrib[k]
one_edge_din_mesh = np.concatenate((one_edge_din_mesh, v))
partials['mesh', 'in_mesh'][nn:] = np.tile(one_edge_din_mesh, nx)
# print("partials['mesh', 'in_mesh'] =",partials['mesh', 'in_mesh'])
else:
one_edge_ddihedral = np.array([])
for k in range(int((ny-1)/2)):
one_edge_ddihedral = np.concatenate((one_edge_ddihedral, temp[int((ny-1)/2) - (1+k)]*np.ones(int((ny-1)/2) - k)))
one_edge_ddihedral = np.concatenate((one_edge_ddihedral, temp[int((ny-1)/2) + k]*np.ones(int((ny-1)/2) - k)))
partials['mesh', 'dihedral_distrib'][:] = np.tile(one_edge_ddihedral, nx)
# print("partials['mesh', 'dihedral_distrib'] =",partials['mesh', 'dihedral_distrib'])
nn = nx * ny * 3
partials['mesh', 'in_mesh'][:nn] = np.ones(nn)
one_edge_din_mesh_left = np.array([])
one_edge_din_mesh_right = np.array([])
for j in range(int((ny-1)/2)): # fill the left part of one_edge_din_mesh
v_left = np.zeros(int((ny-1)/2) +1 -j)
for k in range(j, int((ny-1)/2)):
v_left[k - j] += -tan_dihedral_distrib[k]
v_left[k+1 -j] += tan_dihedral_distrib[k]
one_edge_din_mesh_left = np.concatenate((one_edge_din_mesh_left, v_left))
for j in range(int((ny-1)/2) +1, ny): # fill the right part of one_edge_din_mesh
v_right = np.zeros(j + 1 - int((ny-1)/2))
for k in range(int((ny-1)/2), j):
v_right[k - int((ny-1)/2)] += -tan_dihedral_distrib[k]
v_right[k+1 - int((ny-1)/2)] += tan_dihedral_distrib[k]
one_edge_din_mesh_right = np.concatenate((one_edge_din_mesh_right, v_right))
one_edge_din_mesh = np.array([])
one_edge_din_mesh = np.concatenate((one_edge_din_mesh_left, one_edge_din_mesh_right))
partials['mesh', 'in_mesh'][nn:] = np.tile(one_edge_din_mesh, nx)
# print("partials['mesh', 'in_mesh'] =",partials['mesh', 'in_mesh'])
| 38.028169 | 190 | 0.556706 |
e3b53ba682d8ca586ab871c7b7bffc32cb4b336a | 853 | py | Python | venv35/bin/rst2latex.py | green10-syntra-ab-python-adv/mastering-ch08 | a6b97ac37d24ff65764cae2f68bbb50a08249c84 | [
"BSD-2-Clause"
] | null | null | null | venv35/bin/rst2latex.py | green10-syntra-ab-python-adv/mastering-ch08 | a6b97ac37d24ff65764cae2f68bbb50a08249c84 | [
"BSD-2-Clause"
] | null | null | null | venv35/bin/rst2latex.py | green10-syntra-ab-python-adv/mastering-ch08 | a6b97ac37d24ff65764cae2f68bbb50a08249c84 | [
"BSD-2-Clause"
] | null | null | null | #!/data/Syntra-Sync/Syntra-AB/python-adv/4-code/mastering-ch08/venv35/bin/python
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| 31.592593 | 80 | 0.679953 |
a17b0df890d1a399ee0b419d190f3243d1f845fd | 262 | py | Python | data/studio21_generated/interview/1892/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/interview/1892/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/interview/1892/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | # Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
| 29.111111 | 67 | 0.645038 |
28820ff2ce263348044aeecf5288b222763686b5 | 4,201 | py | Python | adage/analyze/management/commands/add_ml_model.py | greenelab/old-adage-server | c2178144f2cfa75b88656c84eb62f81c65eba46d | [
"BSD-3-Clause"
] | 9 | 2016-09-19T12:34:11.000Z | 2019-02-19T00:17:37.000Z | adage/analyze/management/commands/add_ml_model.py | greenelab/old-adage-server | c2178144f2cfa75b88656c84eb62f81c65eba46d | [
"BSD-3-Clause"
] | 347 | 2016-09-01T19:58:29.000Z | 2020-06-06T14:01:47.000Z | adage/analyze/management/commands/add_ml_model.py | greenelab/old-adage-server | c2178144f2cfa75b88656c84eb62f81c65eba46d | [
"BSD-3-Clause"
] | 11 | 2016-09-01T19:29:16.000Z | 2020-04-13T19:53:45.000Z | #!/usr/bin/env python
"""
This management command adds a new machine learning model record whose
name is ml_model_name into the database's ml_model table. It should be
invoked like this:
python manage.py add_ml_model <ml_model_name> <organism_tax_id> \
[--directed_edge] [--g2g_edge_cutoff <cutoff_value>] \
[--desc_html <desc_html>]
The two required arguments are:
(1) ml_model_name: machine learning model name;
(2) organism_tax_id: taxonomy ID of the organism of ml_model_name.
"--directed_edge" is an optional argument. If it is specified, the
edges in the gene-gene relationship table will be directed; otherwise
the edges in the gene-gene relationship table will be undirected.
"--g2g_edge_cutoff" is another optional argument. If it is specified,
the numeric value that follows will be the cutoff value of the edges in
gene-gene network; otherwise the edge cutoff value will be set to 0.
"--desc_html" is another optional argument that is the model description
in html format; the default is an empty string.
IMPORTANT:
Before running this command, please make sure that organism_tax_id
already exists in the database's "Organism" table, whose model is
bundled in "django-organisms" package. If organism_tax_id is not in the
database yet, you can use "organisms_create_or_update.py" management
command in that package to add it.
"""
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandError
from organisms.models import Organism
from analyze.models import MLModel
class Command(BaseCommand):
help = "Add a new machine learning model to the database."
def add_arguments(self, parser):
parser.add_argument('ml_model_name', type=str)
parser.add_argument('organism_tax_id', type=int)
parser.add_argument('--directed_edge',
action='store_true',
dest='directed',
default=False,
help='Create directed gene-gene relationship '
'edges')
parser.add_argument('--g2g_edge_cutoff',
type=float,
dest='g2g_edge_cutoff',
default=0.0,
help='Gene-gene network edge cutoff value')
parser.add_argument('--desc_html',
type=str,
dest='desc_html',
default='',
help='Model description in HTML format')
def handle(self, **options):
try:
add_ml_model(options['ml_model_name'],
options['organism_tax_id'],
options['directed'],
options['g2g_edge_cutoff'],
options['desc_html'])
self.stdout.write(self.style.NOTICE(
"Added a new machine learning model successfully"))
except Exception as e:
raise CommandError(
"Failed to add a new machine learning model: add_ml_model "
"raised an exception:\n%s" % e)
def add_ml_model(ml_model_name, organism_tax_id, directed_edge, edge_cutoff,
desc_html):
# Raise an exception if ml_model_name on the command line is "" or
# " ".
if not ml_model_name or ml_model_name.isspace():
raise Exception("Input ml_model_name is blank")
# Raise an exception if organism_tax_id does not exist in Organism
# table.
try:
organism = Organism.objects.get(taxonomy_id=organism_tax_id)
except Organism.DoesNotExist:
raise Exception("Input organism_tax_id is not found in the database. "
"Please use the management command "
"'organism_create_or_update.py' in django-organisms "
"package to create this organism.")
MLModel.objects.create(title=ml_model_name,
organism=organism,
directed_g2g_edge=directed_edge,
g2g_edge_cutoff=edge_cutoff,
desc_html=desc_html)
| 41.594059 | 78 | 0.625803 |
ba911cb67016c401dcff1a66e81f8642319dbc86 | 1,979 | py | Python | examples/example.py | ykri021/CarND-Advanced-Lane-Lines | 45b517f137b8a9b9b725a14e69f3b2f92557bc7a | [
"MIT"
] | null | null | null | examples/example.py | ykri021/CarND-Advanced-Lane-Lines | 45b517f137b8a9b9b725a14e69f3b2f92557bc7a | [
"MIT"
] | null | null | null | examples/example.py | ykri021/CarND-Advanced-Lane-Lines | 45b517f137b8a9b9b725a14e69f3b2f92557bc7a | [
"MIT"
] | null | null | null |
# coding: utf-8
# ## Advanced Lane Finding Project
#
# The goals / steps of this project are the following:
#
# * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# * Apply a distortion correction to raw images.
# * Use color transforms, gradients, etc., to create a thresholded binary image.
# * Apply a perspective transform to rectify binary image ("birds-eye view").
# * Detect lane pixels and fit to find the lane boundary.
# * Determine the curvature of the lane and vehicle position with respect to center.
# * Warp the detected lane boundaries back onto the original image.
# * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
#
# ---
# ## First, I'll compute the camera calibration using chessboard images
# In[1]:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'qt')
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('../camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
cv2.imshow('img',img)
cv2.waitKey(500)
cv2.destroyAllWindows()
# ## And so on and so forth...
| 31.919355 | 113 | 0.704901 |
d9b980ea49090296a625458201b726662e25dc17 | 907 | py | Python | setup.py | jvivian/rnaseq-lib | 688ab84b73b44c2a3b6256ed390f1a54d13bfa8a | [
"MIT"
] | null | null | null | setup.py | jvivian/rnaseq-lib | 688ab84b73b44c2a3b6256ed390f1a54d13bfa8a | [
"MIT"
] | null | null | null | setup.py | jvivian/rnaseq-lib | 688ab84b73b44c2a3b6256ed390f1a54d13bfa8a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(name='rnaseq-lib',
version='1.0a27',
description='Library of convenience functions related to current research',
url='http://github.com/jvivian/rnaseq-lib',
author='John Vivian',
author_email='jtvivian@gmail.com',
license='MIT',
package_dir={'': 'src'},
packages=find_packages('src'),
package_data={'rnaseq_lib': ['data/*']},
install_requires=['pandas',
'numpy',
'seaborn',
'holoviews',
'scipy',
'sklearn'],
extras_require={
'web': [
'requests',
'mygene',
'bs4',
'biopython',
'synpaseclient',
'xmltodict'],
'trimap': ['annoy',
'numba']}
)
| 30.233333 | 81 | 0.46086 |
19bd4a70e52e55769d33adfc9456a833c797eeb5 | 1,785 | py | Python | TP_ALGO_2/ALGOS.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null | TP_ALGO_2/ALGOS.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null | TP_ALGO_2/ALGOS.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null | #coding=UTF-8
import math
#EXO 1
def res_sec_deg(a,b,c):
delta=b**2-4*a*c
if delta>0:
x=(-b-math.sqrt(delta))/2*a
y=(-b+math.sqrt(delta))/2*a
print("Deux solutions :")
print(x,y)
elif delta==0:
x=-b/(2*a)
print("Une solution")
print(x)
else:
print("Pas de solution")
#a=5
#b=-70
#c=2
#res_sec_deg(a,b,c)
#EXO 2
#Q1
def perimetre(rayon):
p=2*math.pi*rayon
#print("Périmètre : ")
#print(p)
return p
def surface(rayon):
s=math.pi*rayon*rayon
#print("Surface : ")
#print(s)
return s
#rayon=input()
#perimetre_surface(rayon)
def surface_volume(rayon,hauteur):
print(perimetre(rayon)*hauteur)
print(surface(rayon)*hauteur)
#surface_volume(5,10)
#EXO 3
import random
def user_nb_rand():
chiffre_alea = random.randint(0,100)
chiffre = 0
boucle = True
while boucle == True:
print("Rentre un chiffre :")
chiffre=input()
if chiffre > chiffre_alea:
print("Chiffre plus petit")
elif chiffre < chiffre_alea:
print("Chiffre plus grand")
else:
print("Bravo, chiffre trouve")
boucle = False
#user_nb_rand()
#EXO4
def ordi_nb_rand():
print("Borne inf : ")
a=input()
print("Borne sup : ")
b=input()
chiffre_alea = input("Rentre un chiffre : ")
chiffre=(a+b)/2
print(chiffre)
boucle = True
while boucle == True:
#chiffre=(a+b)/2
print("Chiffre plus grand que "+str(chiffre)+" ? [y=1/n=0]")
rep = input()
if rep == 0:
b=chiffre
chiffre = (a+b)/2
elif rep == 1:
a=chiffre
chiffre = (a+b)/2
else:
print("BRAVO")
ordi_nb_rand()
| 21.506024 | 68 | 0.545658 |
7eeb7ce114b1338af1266862eddbfe42b4f31f30 | 1,929 | py | Python | config.py | ZhiShiMao/pity | c9bcb9c3884b006a29af86867acbcec652f722bd | [
"Apache-2.0"
] | null | null | null | config.py | ZhiShiMao/pity | c9bcb9c3884b006a29af86867acbcec652f722bd | [
"Apache-2.0"
] | null | null | null | config.py | ZhiShiMao/pity | c9bcb9c3884b006a29af86867acbcec652f722bd | [
"Apache-2.0"
] | null | null | null | # 基础配置类
import os
class Config(object):
ROOT = os.path.dirname(os.path.abspath(__file__))
LOG_NAME = os.path.join(ROOT, 'logs', 'pity.log')
# JSON_AS_ASCII = False # Flask jsonify编码问题
# MySQL连接信息
MYSQL_HOST = "121.5.2.74"
MYSQL_PORT = 3306
MYSQL_USER = "root"
MYSQL_PWD = "wuranxu@33"
DBNAME = "pity"
REDIS_HOST = "121.5.2.74"
REDIS_PORT = 7788
REDIS_DB = 0
REDIS_PASSWORD = "woodywu"
# Redis连接信息
REDIS_NODES = [{"host": REDIS_HOST, "port": REDIS_PORT, "db": REDIS_DB, "password": REDIS_PASSWORD}]
# sqlalchemy
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(
MYSQL_USER, MYSQL_PWD, MYSQL_HOST, MYSQL_PORT, DBNAME)
# 异步URI
ASYNC_SQLALCHEMY_URI = f'mysql+aiomysql://{MYSQL_USER}:{MYSQL_PWD}@{MYSQL_HOST}:{MYSQL_PORT}/{DBNAME}'
SQLALCHEMY_TRACK_MODIFICATIONS = False
# 权限 0 普通用户 1 组长 2 管理员
MEMBER = 0
MANAGER = 1
ADMIN = 2
# github access_token地址
GITHUB_ACCESS = "https://github.com/login/oauth/access_token"
# github获取用户信息
GITHUB_USER = "https://api.github.com/user"
# client_id
CLIENT_ID = "0f4fc0a875de30614a6a"
# CLIENT_ID = "c46c7ae33442d13498cd"
# SECRET
SECRET_KEY = "a13c22377318291d5932bc5b62c1885b344355a0"
# SECRET_KEY = "c79fafe58ff45f6b5b51ddde70d2d645209e38b9"
# 测试报告路径
REPORT_PATH = os.path.join(ROOT, "templates", "report.html")
# APP 路径
APP_PATH = os.path.join(ROOT, "app")
# dao路径
DAO_PATH = os.path.join(APP_PATH, 'crud')
SERVER_REPORT = "http://test.pity.fun/#/record/report/"
ALIYUN = "aliyun"
GITEE = "gitee"
# 请求类型
class BodyType:
none = 0
json = 1
form = 2
x_form = 3
binary = 4
graphQL = 5
# 前置条件类型
class ConstructorType:
testcase = 0
sql = 1
redis = 2
py_script = 3
http = 4
| 23.240964 | 106 | 0.617937 |
ff2af6fcea283879bce5b864c22ac229f3239b18 | 1,050 | py | Python | var/spack/repos/builtin/packages/nlohmann-json-schema-validator/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/nlohmann-json-schema-validator/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/nlohmann-json-schema-validator/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NlohmannJsonSchemaValidator(CMakePackage):
"""JSON schema validator for JSON for Modern C++"""
homepage = "https://github.com/pboettch/json-schema-validator"
url = "https://github.com/pboettch/json-schema-validator/archive/2.1.0.tar.gz"
git = "https://github.com/pboettch/json-schema-validator.git"
version('master', branch='master')
version('2.1.0', sha256='83f61d8112f485e0d3f1e72d51610ba3924b179926a8376aef3c038770faf202')
version('2.0.0', sha256='ca8e4ca5a88c49ea52b5f5c2a08a293dbf02b2fc66cb8c09d4cce5810ee98b57')
version('1.0.0', sha256='4bdcbf6ce98eda993d8a928dbe97a03f46643395cb872af875a908156596cc4b')
depends_on('cmake@3.2:', type='build')
depends_on('nlohmann-json')
def cmake_args(self):
args = ['-DBUILD_SHARED_LIBS:BOOL=ON']
return args
| 38.888889 | 95 | 0.735238 |
1ff940ed383e2b6d005f64ff379d151469133126 | 1,683 | py | Python | TikTokAPI/utils.py | xclnky/TikTokAPI-Python | 4cf03a3594da363ccc8c4925b4799258f626546e | [
"MIT"
] | null | null | null | TikTokAPI/utils.py | xclnky/TikTokAPI-Python | 4cf03a3594da363ccc8c4925b4799258f626546e | [
"MIT"
] | null | null | null | TikTokAPI/utils.py | xclnky/TikTokAPI-Python | 4cf03a3594da363ccc8c4925b4799258f626546e | [
"MIT"
] | null | null | null | import random
import string
import requests
import json
def random_key(length):
key = ''
for i in range(length):
key += random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits)
return key
def build_get_url(base_url, params, append=False):
final_url = base_url
if append:
final_url += "&"
else:
final_url += "?"
for key, val in params.items():
final_url += key + "=" + val + "&"
final_url = final_url[:-1]
return final_url
def get_req_json(url, params=None, headers=None, proxy_endpoint=None):
proxies = {'http': 'http://{}'.format(proxy_endpoint), 'https': 'https://{}'.format(proxy_endpoint)} if proxy_endpoint else None
headers["Host"] = url.split("/")[2]
r = requests.get(url, params=params, headers=headers, proxies=proxies)
return json.loads(r.text)
def get_req_content(url, params=None, headers=None, proxy_endpoint=None):
proxies = {'http': 'http://{}'.format(proxy_endpoint), 'https': 'https://{}'.format(proxy_endpoint)} if proxy_endpoint else None
headers["Host"] = url.split("/")[2]
r = requests.get(url, params=params, headers=headers, proxies=proxies)
return r.content
def get_req_text(url, params=None, headers=None, proxy_endpoint=None):
proxies = {'http': 'http://{}'.format(proxy_endpoint), 'https': 'https://{}'.format(proxy_endpoint)} if proxy_endpoint else None
headers["Host"] = url.split("/")[2]
r = requests.get(url, params=params, headers=headers, proxies=proxies)
return r.text
def python_list2_web_list(data):
web_list = "[\""
web_list += '", "'.join(data)
web_list += "\"]"
return web_list
| 32.365385 | 132 | 0.658942 |
c9e361c6d3982de0b5717ca1c6eff831a0e2d653 | 11,833 | py | Python | document.py | yvesscherrer/stanzatagger | 89ce8de48f4775323bab810411442f00729ba73b | [
"Apache-2.0"
] | null | null | null | document.py | yvesscherrer/stanzatagger | 89ce8de48f4775323bab810411442f00729ba73b | [
"Apache-2.0"
] | null | null | null | document.py | yvesscherrer/stanzatagger | 89ce8de48f4775323bab810411442f00729ba73b | [
"Apache-2.0"
] | 1 | 2022-03-25T10:08:20.000Z | 2022-03-25T10:08:20.000Z | # Reimplementation of stanza.models.common.doc and stanza.utils.conll that allows for more flexibility with not-quite-CoNLL-conform datasets
import logging
import io
import random
from evaluator import Evaluator, POS_KEY
logger = logging.getLogger('stanza')
class Document(object):
def __init__(self, read_positions={"id": 0, "cform": 1, "wform": 1, "pos": 3, "feats": 5}, write_positions={"id": 0, "cform": 1, "wform": 1, "pos": 3, "feats": 5}, from_file=None, from_string=None, copy_untouched=True, sample_ratio=1.0, cut_first=-1):
self.read_positions = {x: read_positions[x] for x in read_positions if read_positions[x] >= 0}
self.write_positions = {x: write_positions[x] for x in write_positions if write_positions[x] >= 0}
self.ignore_comments = ("id" in read_positions and read_positions["id"] == 0)
self.copy_untouched = copy_untouched
self.sentences = []
if isinstance(from_file, list):
for f in from_file:
self.load_from_file(f, sample_ratio, cut_first)
else:
self.load_from_file(from_file, sample_ratio, cut_first)
if from_string:
self.load_from_string(from_string)
def __len__(self):
return len(self.sentences)
def __iter__(self):
return iter(self.sentences)
def _load(self, f, sample_ratio, cut_first):
new_sentences = []
sent = Sentence()
for line in f:
if len(line.strip()) == 0:
if len(sent) > 0:
new_sentences.append(sent)
sent = Sentence()
else:
if self.ignore_comments and line.startswith('#'):
continue
array = line.split('\t')
array[-1] = array[-1].strip()
sent.add_token(array)
if len(sent) > 0:
new_sentences.append(sent)
if cut_first > 0 and len(new_sentences) > cut_first:
new_sentences = new_sentences[:cut_first]
logger.info("Reduce dataset to first {} instances".format(cut_first))
if sample_ratio < 1.0:
keep = int(sample_ratio * len(new_sentences))
new_sentences = random.sample(new_sentences, keep)
logger.info("Subsample dataset with rate {:g}".format(sample_ratio))
self.sentences.extend(new_sentences)
return len(new_sentences)
def load_from_file(self, filename, sample_ratio=1.0, cut_first=-1):
new_sents = self._load(open(filename), sample_ratio, cut_first)
logger.info("{} sentences loaded from file {}".format(new_sents, filename))
def load_from_string(self, s, sample_ratio=1.0, cut_first=-1):
new_sents = self._load(io.StringIO(s), sample_ratio, cut_first)
logger.info("{} sentences loaded from string".format(new_sents))
def _write(self, f, pred=True):
for sent in self.sentences:
for token in sent:
if self.copy_untouched:
array = ["_" for _ in range(max(max(self.write_positions.values())+1, len(token.given)))]
else:
array = ["_" for _ in range(max(self.write_positions.values())+1)]
for key, pos in self.write_positions.items():
if pred and key in token.pred:
if key == 'unk':
if token.pred[key]:
array[pos] = "OOV"
else:
array[pos] = token.pred[key]
elif key in self.read_positions:
array[pos] = token.given[self.read_positions[key]]
if self.copy_untouched:
for pos in range(len(array)):
if pos not in self.write_positions.values():
array[pos] = token.given[pos]
f.write("\t".join(array) + "\n")
f.write("\n")
def write_to_file(self, filename, pred=True):
self._write(open(filename, 'w'), pred=pred)
logger.info("Predictions written to file {}".format(filename))
def write_to_string(self, pred=True):
s = io.StringIO()
self._write(s, pred=pred)
return s
def provide_data(self):
doc_array = []
for sent in self.sentences:
sent_array = []
for token in sent:
if "id" in self.read_positions and ("." in token.given[self.read_positions["id"]] or "-" in token.given[self.read_positions["id"]]):
continue
token_array = []
for key in ("cform", "wform", "pos", "feats"):
if key in self.read_positions:
token_array.append(token.given[self.read_positions[key]])
else:
token_array.append("_")
sent_array.append(token_array)
doc_array.append(sent_array)
return doc_array
def add_predictions(self, doc_array):
assert(len(doc_array) == len(self.sentences))
for sent_array, sent in zip(doc_array, self.sentences):
ti = 0
for token in sent:
if "id" in self.read_positions and ("." in token.given[self.read_positions["id"]] or "-" in token.given[self.read_positions["id"]]):
continue
token.pred = {"pos": sent_array[ti][0], "feats": sent_array[ti][1], "unk": sent_array[ti][2]}
ti += 1
assert(ti == len(sent_array))
def evaluate(self):
feats_evaluator = Evaluator(mode="by_feats")
feats_oov_evaluator = Evaluator(mode="by_feats")
exact_evaluator = Evaluator(mode="exact", only_univ=True)
if "pos" not in self.read_positions and "feats" not in self.read_positions:
logger.info("Cannot evaluate predictions because gold annotations are not available")
return feats_evaluator, feats_oov_evaluator, exact_evaluator
for sent in self.sentences:
for token in sent:
pred_feats = {}
gold_feats = {}
if "feats" in token.pred and "feats" in self.read_positions:
if token.pred["feats"] not in ("_", ""):
pred_feats = dict([x.split("=", 1) for x in token.pred["feats"].split("|")])
if token.given[self.read_positions["feats"]] not in ("_", ""):
gold_feats = dict([x.split("=", 1) for x in token.given[self.read_positions["feats"]].split("|")])
exact_evaluator.add_instance(gold_feats, pred_feats) # do not add POS here
if "pos" in token.pred and "pos" in self.read_positions:
pred_feats.update({POS_KEY: token.pred["pos"]})
gold_feats.update({POS_KEY: token.given[self.read_positions["pos"]]})
feats_evaluator.add_instance(gold_feats, pred_feats)
if "unk" in token.pred and token.pred["unk"]:
feats_oov_evaluator.add_instance(gold_feats, pred_feats)
return feats_evaluator, feats_oov_evaluator, exact_evaluator
def get_augment_ratio(self, should_augment_predicate, can_augment_predicate, desired_ratio=0.1, max_ratio=0.5):
"""
Returns X so that if you randomly select X * N sentences, you get 10%
The ratio will be chosen in the assumption that the final dataset
is of size N rather than N + X * N.
should_augment_predicate: returns True if the sentence has some
feature which we may want to change occasionally. for example,
depparse sentences which end in punct
can_augment_predicate: in the depparse sentences example, it is
technically possible for the punct at the end to be the parent
of some other word in the sentence. in that case, the sentence
should not be chosen. should be at least as restrictive as
should_augment_predicate
"""
n_data = len(self.sentences)
n_should_augment = sum(should_augment_predicate(sentence) for sentence in self.sentences)
n_can_augment = sum(can_augment_predicate(sentence) for sentence in self.sentences)
n_error = sum(can_augment_predicate(sentence) and not should_augment_predicate(sentence)
for sentence in self.sentences)
if n_error > 0:
raise AssertionError("can_augment_predicate allowed sentences not allowed by should_augment_predicate")
if n_can_augment == 0:
logger.warning("Found no sentences which matched can_augment_predicate {}".format(can_augment_predicate))
return 0.0
n_needed = n_data * desired_ratio - (n_data - n_should_augment)
# if we want 10%, for example, and more than 10% already matches, we can skip
if n_needed < 0:
return 0.0
ratio = n_needed / n_can_augment
if ratio > max_ratio:
return max_ratio
return ratio
def augment_punct(self, augment_ratio=None, punct_tag='PUNCT'):
"""
Adds extra training data to compensate for some models having all sentences end with PUNCT
Some of the models (for example, UD_Hebrew-HTB) have the flaw that
all of the training sentences end with PUNCT. The model therefore
learns to finish every sentence with punctuation, even if it is
given a sentence with non-punct at the end.
One simple way to fix this is to train on some fraction of training data with punct.
Params:
train_data: list of list of dicts, eg a conll doc
augment_ratio: the fraction to augment. if None, a best guess is made to get to 10%
TODO: do this dynamically, as part of the DataLoader or elsewhere?
One complication is the data comes back from the DataLoader as
tensors & indices, so it is much more complicated to manipulate
"""
if len(self.sentences) == 0:
return []
can_augment_nopunct = lambda x: x.tokens[-1].given[self.read_positions['pos']] == punct_tag
should_augment_nopunct = lambda x: x.tokens[-1].given[self.read_positions['pos']] == punct_tag
if augment_ratio is None:
augment_ratio = self.get_augment_ratio(should_augment_nopunct, can_augment_nopunct)
logger.info("Determined augmentation ratio {:.2f}".format(augment_ratio))
if augment_ratio <= 0:
logger.info("Skipping data augmentation")
return
new_data = []
for sentence in self.sentences:
if can_augment_nopunct(sentence):
if random.random() < augment_ratio and len(sentence) > 1:
# todo: could deep copy the words
# or not deep copy any of this
new_sentence = sentence.copy(remove_last=True)
new_data.append(new_sentence)
self.sentences.extend(new_data)
logger.info("{} sentences available after augmentation".format(len(self.sentences)))
class Sentence(object):
def __init__(self):
self.tokens = []
def __len__(self):
return len(self.tokens)
def __iter__(self):
return iter(self.tokens)
def add_token(self, token):
t = Token(token)
self.tokens.append(t)
def copy(self, remove_last=False):
new = Sentence()
if remove_last:
for t in self.tokens[:-1]:
new.add_token(t.given)
else:
for t in self.tokens:
new.add_token(t.given)
return new
class Token(object):
def __init__(self, token):
self.given = token
self.pred = {}
| 43.825926 | 255 | 0.598749 |
08b3b19eabd231b6e9a6bda8cf5c2d75fd2f750a | 18,258 | py | Python | pytorch_tabular/models/mixture_density/config.py | Vickyilango/pytorch_tabular | 10dbb1203516a83cb0d8e804806cc70ed8453128 | [
"MIT"
] | 560 | 2020-12-31T15:31:56.000Z | 2022-03-30T20:29:06.000Z | pytorch_tabular/models/mixture_density/config.py | yu45020/pytorch_tabular | cb29e735efa83c83b0cf98388f63b13a5f8f77f7 | [
"MIT"
] | 60 | 2021-02-04T05:54:33.000Z | 2022-03-31T07:40:34.000Z | pytorch_tabular/models/mixture_density/config.py | yu45020/pytorch_tabular | cb29e735efa83c83b0cf98388f63b13a5f8f77f7 | [
"MIT"
] | 63 | 2021-01-28T16:12:22.000Z | 2022-03-31T07:31:41.000Z | # Pytorch Tabular
# Author: Manu Joseph <manujoseph@gmail.com>
# For license information, see LICENSE.TXT
"""Mixture Density Head Config"""
from dataclasses import MISSING, dataclass, field
from typing import List, Optional
from pytorch_tabular.models.autoint import AutoIntConfig
from pytorch_tabular.models.category_embedding import CategoryEmbeddingModelConfig
from pytorch_tabular.models.node import NodeConfig
@dataclass
class MixtureDensityHeadConfig:
"""MixtureDensityHead configuration
Args:
num_gaussian (int): Number of Gaussian Distributions in the mixture model. Defaults to 1
n_samples (int): Number of samples to draw from the posterior to get prediction. Defaults to 100
central_tendency (str): Which measure to use to get the point prediction.
Choices are 'mean', 'median'. Defaults to `mean`
sigma_bias_flag (bool): Whether to have a bias term in the sigma layer. Defaults to False
mu_bias_init (Optional[List]): To initialize the bias parameter of the mu layer to predefined cluster centers.
Should be a list with the same length as number of gaussians in the mixture model.
It is highly recommended to set the parameter to combat mode collapse. Defaults to None
weight_regularization (Optional[int]): Whether to apply L1 or L2 Norm to the MDN layers.
It is highly recommended to use this to avoid mode collapse. Choices are [1,2]. Defaults to L2
lambda_sigma (Optional[float]): The regularization constant for weight regularization of sigma layer. Defaults to 0.1
lambda_pi (Optional[float]): The regularization constant for weight regularization of pi layer. Defaults to 0.1
lambda_mu (Optional[float]): The regularization constant for weight regularization of mu layer. Defaults to 0.1
softmax_temperature (Optional[float]): The temperature to be used in the gumbel softmax of the mixing coefficients.
Values less than one leads to sharper transition between the multiple components. Defaults to 1
speedup_training (bool): Turning on this parameter does away with sampling during training which speeds up training,
but also doesn't give you visibility on train metrics. Defaults to False
log_debug_plot (bool): Turning on this parameter plots histograms of the mu, sigma, and pi layers in addition to the logits
(if log_logits is turned on in experment config). Defaults to False
"""
num_gaussian: int = field(
default=1,
metadata={
"help": "Number of Gaussian Distributions in the mixture model. Defaults to 1",
},
)
sigma_bias_flag: bool = field(
default=False,
metadata={
"help": "Whether to have a bias term in the sigma layer. Defaults to False",
},
)
mu_bias_init: Optional[List] = field(
default=None,
metadata={
"help": "To initialize the bias parameter of the mu layer to predefined cluster centers. Should be a list with the same length as number of gaussians in the mixture model. It is highly recommended to set the parameter to combat mode collapse. Defaults to None",
},
)
weight_regularization: Optional[int] = field(
default=2,
metadata={
"help": "Whether to apply L1 or L2 Norm to the MDN layers. Defaults to L2",
"choices": [1, 2],
},
)
lambda_sigma: Optional[float] = field(
default=0.1,
metadata={
"help": "The regularization constant for weight regularization of sigma layer. Defaults to 0.1",
},
)
lambda_pi: Optional[float] = field(
default=0.1,
metadata={
"help": "The regularization constant for weight regularization of pi layer. Defaults to 0.1",
},
)
lambda_mu: Optional[float] = field(
default=0,
metadata={
"help": "The regularization constant for weight regularization of mu layer. Defaults to 0",
},
)
softmax_temperature: Optional[float] = field(
default=1,
metadata={
"help": "The temperature to be used in the gumbel softmax of the mixing coefficients. Values less than one leads to sharper transition between the multiple components. Defaults to 1",
},
)
n_samples: int = field(
default=100,
metadata={
"help": "Number of samples to draw from the posterior to get prediction. Defaults to 100",
},
)
central_tendency: str = field(
default="mean",
metadata={
"help": "Which measure to use to get the point prediction. Defaults to mean",
"choices": ["mean", "median"],
},
)
speedup_training: bool = field(
default=False,
metadata={
"help": "Turning on this parameter does away with sampling during training which speeds up training, but also doesn't give you visibility on train metrics. Defaults to False",
},
)
log_debug_plot: bool = field(
default=False,
metadata={
"help": "Turning on this parameter plots histograms of the mu, sigma, and pi layers in addition to the logits(if log_logits is turned on in experment config). Defaults to False",
},
)
_module_src: str = field(default="mixture_density")
_model_name: str = field(default="MixtureDensityHead")
_config_name: str = field(default="MixtureDensityHeadConfig")
@dataclass
class CategoryEmbeddingMDNConfig(CategoryEmbeddingModelConfig):
"""CategoryEmbeddingMDN configuration
Args:
task (str): Specify whether the problem is regression of classification.Choices are: regression classification
learning_rate (float): The learning rate of the model
loss (Union[str, NoneType]): The loss function to be applied.
By Default it is MSELoss for regression and CrossEntropyLoss for classification.
Unless you are sure what you are doing, leave it at MSELoss or L1Loss for regression and CrossEntropyLoss for classification
metrics (Union[List[str], NoneType]): the list of metrics you need to track during training.
The metrics should be one of the metrics implemented in PyTorch Lightning.
By default, it is Accuracy if classification and MeanSquaredLogError for regression
metrics_params (Union[List, NoneType]): The parameters to be passed to the Metrics initialized
target_range (Union[List, NoneType]): The range in which we should limit the output variable. Currently ignored for multi-target regression
Typically used for Regression problems. If left empty, will not apply any restrictions
layers (str): Hyphen-separated number of layers and units in the classification head. eg. 32-64-32.
batch_norm_continuous_input (bool): If True, we will normalize the contiinuous layer by passing it through a BatchNorm layer
activation (str): The activation type in the classification head.
The default activation in PyTorch like ReLU, TanH, LeakyReLU, etc.
https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity
embedding_dims (Union[List[int], NoneType]): The dimensions of the embedding for each categorical column
as a list of tuples (cardinality, embedding_dim). If left empty, will infer using the cardinality of the categorical column
using the rule min(50, (x + 1) // 2)
embedding_dropout (float): probability of an embedding element to be zeroed.
dropout (float): probability of an classification element to be zeroed.
use_batch_norm (bool): Flag to include a BatchNorm layer after each Linear Layer+DropOut
initialization (str): Initialization scheme for the linear layers. Choices are: `kaiming` `xavier` `random`
mdn_config (MixtureDensityHeadConfig): The config for defining the Mixed Density Network Head
Raises:
NotImplementedError: Raises an error if task is not in ['regression','classification']
"""
mdn_config: MixtureDensityHeadConfig = field(
default=None,
metadata={"help": "The config for defining the Mixed Density Network Head"},
)
_module_src: str = field(default="mixture_density")
_model_name: str = field(default="CategoryEmbeddingMDN")
_config_name: str = field(default="CategoryEmbeddingMDNConfig")
_probabilistic: bool = field(default=True)
@dataclass
class NODEMDNConfig(NodeConfig):
"""NODEMDN configuration
Args:
task (str): Specify whether the problem is regression of classification.Choices are: regression classification
learning_rate (float): The learning rate of the model
loss (Union[str, NoneType]): The loss function to be applied.
By Default it is MSELoss for regression and CrossEntropyLoss for classification.
Unless you are sure what you are doing, leave it at MSELoss or L1Loss for regression and CrossEntropyLoss for classification
metrics (Union[List[str], NoneType]): the list of metrics you need to track during training.
The metrics should be one of the metrics implemented in PyTorch Lightning.
By default, it is Accuracy if classification and MeanSquaredLogError for regression
metrics_params (Union[List, NoneType]): The parameters to be passed to the Metrics initialized
target_range (Union[List, NoneType]): The range in which we should limit the output variable. Currently ignored for multi-target regression
Typically used for Regression problems. If left empty, will not apply any restrictions
num_layers (int): Number of Oblivious Decision Tree Layers in the Dense Architecture
num_trees (int): Number of Oblivious Decision Trees in each layer
additional_tree_output_dim (int): The additional output dimensions which is only used to
pass through different layers of the architectures. Only the first `output_dim` outputs will be used for prediction
depth (int): The depth of the individual Oblivious Decision Trees
choice_function (str): Generates a sparse probability distribution to be used as feature weights(aka, soft feature selection)
Choices are: ['entmax15', 'sparsemax']
bin_function (str): Generates a sparse probability distribution to be used as tree leaf weights
Choices are: ['entmoid15', 'sparsemoid']
max_features (Union[int, NoneType]): If not None, sets a max limit on the number of features to be carried forward from layer to layer in the Dense Architecture
input_dropout (float): Dropout to be applied to the inputs between layers of the Dense Architecture
initialize_response (str): Initializing the response variable in the Oblivious Decision Trees.
By default, it is a standard normal distribution. Choices are: ['normal', 'uniform']
initialize_selection_logits (str): Initializing the feature selector.
By default is a uniform distribution across the features. Choices are: ['uniform', 'normal']
threshold_init_beta (float):
Used in the Data-aware initialization of thresholds where the threshold is initialized randomly
(with a beta distribution) to feature values in the first batch.
It initializes threshold to a q-th quantile of data points.
where q ~ Beta(:threshold_init_beta:, :threshold_init_beta:)
If this param is set to 1, initial thresholds will have the same distribution as data points
If greater than 1 (e.g. 10), thresholds will be closer to median data value
If less than 1 (e.g. 0.1), thresholds will approach min/max data values.
threshold_init_cutoff (float):
Used in the Data-aware initialization of scales(used in the scaling ODTs).
It is initialized in such a way that all the samples in the first batch belong to the linear
region of the entmoid/sparsemoid(bin-selectors) and thereby have non-zero gradients
Threshold log-temperatures initializer, in (0, inf)
By default(1.0), log-temperatures are initialized in such a way that all bin selectors
end up in the linear region of sparse-sigmoid. The temperatures are then scaled by this parameter.
Setting this value > 1.0 will result in some margin between data points and sparse-sigmoid cutoff value
Setting this value < 1.0 will cause (1 - value) part of data points to end up in flat sparse-sigmoid region
For instance, threshold_init_cutoff = 0.9 will set 10% points equal to 0.0 or 1.0
Setting this value > 1.0 will result in a margin between data points and sparse-sigmoid cutoff value
All points will be between (0.5 - 0.5 / threshold_init_cutoff) and (0.5 + 0.5 / threshold_init_cutoff)
embed_categorical (bool): Flag to embed categorical columns using an Embedding Layer.
If turned off, the categorical columns are encoded using LeaveOneOutEncoder
embedding_dims (Union[List[int], NoneType]): The dimensions of the embedding for each categorical column as a
list of tuples (cardinality, embedding_dim). If left empty, will infer using the cardinality of the categorical column
using the rule min(50, (x + 1) // 2)
embedding_dropout (float): probability of an embedding element to be zeroed.
mdn_config (MixtureDensityHeadConfig): The config for defining the Mixed Density Network Head
Raises:
NotImplementedError: Raises an error if task is not in ['regression','classification']
"""
mdn_config: MixtureDensityHeadConfig = field(
default=None,
metadata={"help": "The config for defining the Mixed Density Network Head"},
)
_module_src: str = field(default="mixture_density")
_model_name: str = field(default="NODEMDN")
_config_name: str = field(default="NODEMDNConfig")
_probabilistic: bool = field(default=True)
@dataclass
class AutoIntMDNConfig(AutoIntConfig):
"""AutomaticFeatureInteraction configuration
Args:
task (str): Specify whether the problem is regression of classification.Choices are: regression classification
learning_rate (float): The learning rate of the model
loss (Union[str, NoneType]): The loss function to be applied.
By Default it is MSELoss for regression and CrossEntropyLoss for classification.
Unless you are sure what you are doing, leave it at MSELoss or L1Loss for regression and CrossEntropyLoss for classification
metrics (Union[List[str], NoneType]): the list of metrics you need to track during training.
The metrics should be one of the metrics implemented in PyTorch Lightning.
By default, it is Accuracy if classification and MeanSquaredLogError for regression
metrics_params (Union[List, NoneType]): The parameters to be passed to the Metrics initialized
target_range (Union[List, NoneType]): The range in which we should limit the output variable. Currently ignored for multi-target regression
Typically used for Regression problems. If left empty, will not apply any restrictions
attn_embed_dim (int): The number of hidden units in the Multi-Headed Attention layers. Defaults to 32
num_heads (int): The number of heads in the Multi-Headed Attention layer. Defaults to 2
num_attn_blocks (int): The number of layers of stacked Multi-Headed Attention layers. Defaults to 2
attn_dropouts (float): Dropout between layers of Multi-Headed Attention Layers. Defaults to 0.0
has_residuals (bool): Flag to have a residual connect from enbedded output to attention layer output.
Defaults to True
embedding_dim (int): The dimensions of the embedding for continuous and categorical columns. Defaults to 16
embedding_dropout (float): probability of an embedding element to be zeroed. Defaults to 0.0
deep_layers (bool): Flag to enable a deep MLP layer before the Multi-Headed Attention layer. Defaults to False
layers (str): Hyphen-separated number of layers and units in the deep MLP. Defaults to 128-64-32
activation (str): The activation type in the deep MLP. The default activaion in PyTorch like
ReLU, TanH, LeakyReLU, etc. https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity.
Defaults to ReLU
dropout (float): probability of an classification element to be zeroed in the deep MLP. Defaults to 0.0
use_batch_norm (bool): Flag to include a BatchNorm layer after each Linear Layer+DropOut. Defaults to False
batch_norm_continuous_input (bool): If True, we will normalize the contiinuous layer by passing it through a BatchNorm layer
initialization (str): Initialization scheme for the linear layers. Defaults to `kaiming`.
Choices are: [`kaiming`,`xavier`,`random`].
Raises:
NotImplementedError: Raises an error if task is not in ['regression','classification']
"""
mdn_config: MixtureDensityHeadConfig = field(
default=None,
metadata={"help": "The config for defining the Mixed Density Network Head"},
)
_module_src: str = field(default="mixture_density")
_model_name: str = field(default="AutoIntMDN")
_config_name: str = field(default="AutoIntMDNConfig")
_probabilistic: bool = field(default=True)
# cls = CategoryEmbeddingModelConfig
# desc = "Configuration for Data."
# doc_str = f"{desc}\nArgs:"
# for key in cls.__dataclass_fields__.keys():
# atr = cls.__dataclass_fields__[key]
# if atr.init:
# type = str(atr.type).replace("<class '","").replace("'>","").replace("typing.","")
# help_str = atr.metadata.get("help","")
# if "choices" in atr.metadata.keys():
# help_str += f'Choices are: {" ".join([str(ch) for ch in atr.metadata["choices"]])}'
# doc_str+=f'\n\t\t{key} ({type}): {help_str}'
# print(doc_str)
| 61.063545 | 273 | 0.701555 |
5a37c825c112e266ed404e6348f3b89202fb7ad2 | 8,445 | py | Python | spyder/plugins/shortcuts/plugin.py | sphh/spyder | bbd9b1fb90470f85e599285449d1f18cc2d0e6e6 | [
"MIT"
] | null | null | null | spyder/plugins/shortcuts/plugin.py | sphh/spyder | bbd9b1fb90470f85e599285449d1f18cc2d0e6e6 | [
"MIT"
] | null | null | null | spyder/plugins/shortcuts/plugin.py | sphh/spyder | bbd9b1fb90470f85e599285449d1f18cc2d0e6e6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009- Spyder Project Contributors
#
# Distributed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Shortcuts Plugin.
"""
# Standard library imports
import configparser
import sys
# Third party imports
from qtpy.QtCore import Qt, Signal
from qtpy.QtGui import QKeySequence
from qtpy.QtWidgets import QAction, QShortcut
# Local imports
from spyder.api.plugins import Plugins, SpyderPluginV2
from spyder.api.plugin_registration.decorators import on_plugin_available
from spyder.api.translations import get_translation
from spyder.plugins.mainmenu.api import ApplicationMenus, HelpMenuSections
from spyder.plugins.shortcuts.confpage import ShortcutsConfigPage
from spyder.plugins.shortcuts.widgets.summary import ShortcutsSummaryDialog
from spyder.utils.qthelpers import add_shortcut_to_tooltip, SpyderAction
# Localization
_ = get_translation('spyder')
class ShortcutActions:
ShortcutSummaryAction = "show_shortcut_summary_action"
# --- Plugin
# ----------------------------------------------------------------------------
class Shortcuts(SpyderPluginV2):
"""
Shortcuts Plugin.
"""
NAME = 'shortcuts'
# TODO: Fix requires to reflect the desired order in the preferences
REQUIRES = [Plugins.Preferences]
OPTIONAL = [Plugins.MainMenu]
CONF_WIDGET_CLASS = ShortcutsConfigPage
CONF_SECTION = NAME
CONF_FILE = False
# --- Signals
# ------------------------------------------------------------------------
sig_shortcuts_updated = Signal()
"""
This signal is emitted to inform shortcuts have been updated.
"""
# --- SpyderPluginV2 API
# ------------------------------------------------------------------------
def get_name(self):
return _("Keyboard shortcuts")
def get_description(self):
return _("Manage application, widget and actions shortcuts.")
def get_icon(self):
return self.create_icon('keyboard')
def on_initialize(self):
self._shortcut_data = []
self.create_action(
ShortcutActions.ShortcutSummaryAction,
text=_("Shortcuts Summary"),
triggered=lambda: self.show_summary(),
register_shortcut=True,
context=Qt.ApplicationShortcut,
)
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.MainMenu)
def on_main_menu_available(self):
mainmenu = self.get_plugin(Plugins.MainMenu)
shortcuts_action = self.get_action(
ShortcutActions.ShortcutSummaryAction)
# Add to Help menu.
help_menu = mainmenu.get_application_menu(ApplicationMenus.Help)
mainmenu.add_item_to_application_menu(
shortcuts_action,
help_menu,
section=HelpMenuSections.Documentation,
)
def on_mainwindow_visible(self):
self.apply_shortcuts()
# --- Public API
# ------------------------------------------------------------------------
def get_shortcut_data(self):
"""
Return the registered shortcut data from the main application window.
"""
return self._shortcut_data
def reset_shortcuts(self):
"""Reset shrotcuts."""
if self._conf:
self._conf.reset_shortcuts()
def show_summary(self):
"""Reset shortcuts."""
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self._shortcut_data.append((qaction_or_qshortcut, context,
name, add_shortcut_to_tip, plugin_name))
def unregister_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
"""
Unregister QAction or QShortcut from Spyder main application.
"""
data = (qaction_or_qshortcut, context, name, add_shortcut_to_tip,
plugin_name)
if data in self._shortcut_data:
self._shortcut_data.remove(data)
def apply_shortcuts(self):
"""
Apply shortcuts settings to all widgets/plugins.
"""
toberemoved = []
# TODO: Check shortcut existence based on action existence, so that we
# can update shortcut names without showing the old ones on the
# preferences
for index, (qobject, context, name, add_shortcut_to_tip,
plugin_name) in enumerate(self._shortcut_data):
try:
shortcut_sequence = self.get_shortcut(context, name,
plugin_name)
except (configparser.NoSectionError, configparser.NoOptionError):
# If shortcut does not exist, save it to CONF. This is an
# action for which there is no shortcut assigned (yet) in
# the configuration
self.set_shortcut(context, name, '', plugin_name)
shortcut_sequence = ''
if shortcut_sequence:
keyseq = QKeySequence(shortcut_sequence)
else:
# Needed to remove old sequences that were cleared.
# See spyder-ide/spyder#12992
keyseq = QKeySequence()
# Do not register shortcuts for the toggle view action.
# The shortcut will be displayed only on the menus and handled by
# about to show/hide signals.
if (name.startswith('switch to')
and isinstance(qobject, SpyderAction)):
keyseq = QKeySequence()
try:
if isinstance(qobject, QAction):
if (sys.platform == 'darwin'
and qobject._shown_shortcut == 'missing'):
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_shortcut_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self._shortcut_data.pop(index)
self.sig_shortcuts_updated.emit()
def get_shortcut(self, context, name, plugin_name=None):
"""
Get keyboard shortcut (key sequence string).
Parameters
----------
context:
Context must be either '_' for global or the name of a plugin.
name: str
Name of the shortcut.
plugin_id: spyder.api.plugins.SpyderpluginV2 or None
The plugin for which the shortcut is registered. Default is None.
Returns
-------
Shortcut
A shortcut object.
"""
return self._conf.get_shortcut(context, name, plugin_name=plugin_name)
def set_shortcut(self, context, name, keystr, plugin_id=None):
"""
Set keyboard shortcut (key sequence string).
Parameters
----------
context:
Context must be either '_' for global or the name of a plugin.
name: str
Name of the shortcut.
keystr: str
Shortcut keys in string form.
plugin_id: spyder.api.plugins.SpyderpluginV2 or None
The plugin for which the shortcut is registered. Default is None.
"""
self._conf.set_shortcut(context, name, keystr, plugin_name=plugin_id)
| 35.783898 | 80 | 0.572528 |
c6465bea580545c79f7696f1742c8ba4c9abb1b7 | 8,640 | py | Python | daal4py/sklearn/linear_model/_linear.py | owerbat/scikit-learn-intelex | 986637668853a00f0047b7a8854ddb9fb3620549 | [
"Apache-2.0"
] | null | null | null | daal4py/sklearn/linear_model/_linear.py | owerbat/scikit-learn-intelex | 986637668853a00f0047b7a8854ddb9fb3620549 | [
"Apache-2.0"
] | null | null | null | daal4py/sklearn/linear_model/_linear.py | owerbat/scikit-learn-intelex | 986637668853a00f0047b7a8854ddb9fb3620549 | [
"Apache-2.0"
] | null | null | null | #===============================================================================
# Copyright 2014-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import numpy as np
from scipy import sparse as sp
from scipy import linalg
from sklearn.linear_model._base import _rescale_data
from ..utils.validation import _daal_check_array, _daal_check_X_y
from ..utils.base import _daal_validate_data
from .._utils import sklearn_check_version
from .._device_offload import support_usm_ndarray
from sklearn.utils.fixes import sparse_lsqr
from sklearn.utils.validation import _check_sample_weight
from sklearn.utils import check_array
from sklearn.linear_model import LinearRegression as LinearRegression_original
if sklearn_check_version('1.0'):
from sklearn.linear_model._base import _deprecate_normalize
try:
from sklearn.utils._joblib import Parallel, delayed
except ImportError:
from sklearn.externals.joblib import Parallel, delayed
import daal4py
from .._utils import (
make2d,
getFPType,
get_patch_message,
is_DataFrame,
get_dtype)
import logging
def _daal4py_fit(self, X, y_):
y = make2d(y_)
X_fptype = getFPType(X)
try:
lr_algorithm = daal4py.linear_regression_training(
fptype=X_fptype,
interceptFlag=bool(self.fit_intercept),
method='defaultDense'
)
lr_res = lr_algorithm.compute(X, y)
except RuntimeError:
# Normal system is not invertible, try QR
try:
lr_algorithm = daal4py.linear_regression_training(
fptype=X_fptype,
interceptFlag=bool(self.fit_intercept),
method='qrDense'
)
lr_res = lr_algorithm.compute(X, y)
except RuntimeError:
# fall back on sklearn
return None
lr_model = lr_res.model
self.daal_model_ = lr_model
coefs = lr_model.Beta
self.intercept_ = coefs[:, 0].copy(order='C')
self.coef_ = coefs[:, 1:].copy(order='C')
self.n_features_in_ = X.shape[1]
self.rank_ = X.shape[1]
self.singular_ = np.full((X.shape[1],), np.nan)
if self.coef_.shape[0] == 1 and y_.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self.intercept_ = self.intercept_[0]
return self
def _daal4py_predict(self, X):
X = make2d(X)
_fptype = getFPType(self.coef_)
lr_pred = daal4py.linear_regression_prediction(
fptype=_fptype,
method='defaultDense'
)
if sklearn_check_version('0.23'):
if X.shape[1] != self.n_features_in_:
raise ValueError(
f'X has {X.shape[1]} features, '
f'but LinearRegression is expecting '
f'{self.n_features_in_} features as input')
try:
lr_res = lr_pred.compute(X, self.daal_model_)
except RuntimeError:
raise ValueError(
f'Input data shape {X.shape} is inconsistent with the trained model'
)
res = lr_res.prediction
if res.shape[1] == 1 and self.coef_.ndim == 1:
res = np.ravel(res)
return res
def _fit_linear(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values
sample_weight : numpy array of shape [n_samples]
Individual weights for each sample
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : returns an instance of self.
"""
params = {
'X': X,
'y': y,
'accept_sparse': ['csr', 'csc', 'coo'],
'y_numeric': True,
'multi_output': True,
}
if sklearn_check_version('0.23'):
X, y = _daal_validate_data(
self,
dtype=[np.float64, np.float32],
**params,
)
else:
X, y = _daal_check_X_y(**params)
dtype = get_dtype(X)
self.fit_shape_good_for_daal_ = \
bool(X.shape[0] > X.shape[1] + int(self.fit_intercept))
daal_ready = self.fit_shape_good_for_daal_ and not sp.issparse(X) and \
sample_weight is None
if sklearn_check_version('0.22') and not sklearn_check_version('0.23'):
daal_ready = daal_ready and dtype in [np.float32, np.float64]
if daal_ready:
logging.info(
"sklearn.linar_model.LinearRegression."
"fit: " + get_patch_message("daal"))
res = _daal4py_fit(self, X, y)
if res is not None:
return res
logging.info(
"sklearn.linar_model.LinearRegression."
"fit: " + get_patch_message("sklearn_after_daal"))
else:
logging.info(
"sklearn.linar_model.LinearRegression."
"fit: " + get_patch_message("sklearn"))
return super(LinearRegression, self).fit(
X,
y,
sample_weight=sample_weight,
)
def _predict_linear(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
is_df = is_DataFrame(X)
if sklearn_check_version('0.23'):
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
X = np.asarray(X) if not sp.issparse(X) and not is_df else X
good_shape_for_daal = \
True if X.ndim <= 1 else True if X.shape[0] > X.shape[1] else False
sklearn_ready = sp.issparse(X) or not hasattr(self, 'daal_model_') or \
not self.fit_shape_good_for_daal_ or not good_shape_for_daal or \
(hasattr(self, 'sample_weight') and self.sample_weight is not None)
if sklearn_ready:
logging.info(
"sklearn.linar_model.LinearRegression."
"predict: " + get_patch_message("sklearn"))
return self._decision_function(X)
logging.info(
"sklearn.linar_model.LinearRegression."
"predict: " + get_patch_message("daal"))
X = _daal_check_array(X)
return _daal4py_predict(self, X)
class LinearRegression(LinearRegression_original):
__doc__ = LinearRegression_original.__doc__
if sklearn_check_version('0.24'):
def __init__(
self,
fit_intercept=True,
normalize='deprecated' if sklearn_check_version('1.0') else False,
copy_X=True,
n_jobs=None,
positive=False,
):
super(LinearRegression, self).__init__(
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
n_jobs=n_jobs,
positive=positive,
)
else:
def __init__(
self,
fit_intercept=True,
normalize=False,
copy_X=True,
n_jobs=None,
):
super(LinearRegression, self).__init__(
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
n_jobs=n_jobs
)
@support_usm_ndarray()
def fit(self, X, y, sample_weight=None):
if sklearn_check_version('1.0'):
self._normalize = _deprecate_normalize(
self.normalize,
default=False,
estimator_name=self.__class__.__name__,
)
if sklearn_check_version('0.24'):
if self.positive is True:
logging.info(
"sklearn.linar_model.LinearRegression."
"fit: " + get_patch_message("sklearn"))
return super(LinearRegression, self).fit(
X, y=y, sample_weight=sample_weight
)
return _fit_linear(self, X, y, sample_weight=sample_weight)
@support_usm_ndarray()
def predict(self, X):
return _predict_linear(self, X)
| 30.967742 | 80 | 0.606944 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.