hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0dfa05fb71babc8359addb393c00a0d8e644d775
| 1,934
|
py
|
Python
|
Protheus_WebApp/Modules/SIGAWMS/WMSA225TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 17
|
2018-09-24T17:27:08.000Z
|
2021-09-16T19:09:46.000Z
|
Protheus_WebApp/Modules/SIGAWMS/WMSA225TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 4
|
2018-09-24T17:30:32.000Z
|
2022-01-03T11:39:30.000Z
|
Protheus_WebApp/Modules/SIGAWMS/WMSA225TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 18
|
2019-06-07T17:41:34.000Z
|
2022-01-31T18:17:31.000Z
|
from tir import Webapp
import unittest
import time
class WMSA225(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAWMS", "10/12/2020", "T1", "M SP 01", "42")
inst.oHelper.Program("WMSA225")
def test_WMSA225_CT001(self):
self.oHelper.SetButton('Incluir')
self.oHelper.SetBranch('M SP 01')
self.oHelper.SetValue('Armazém De ?','')
self.oHelper.SetValue('Armazém Até ?','ZZ')
self.oHelper.SetValue('Produto De ?','')
self.oHelper.SetValue('Produto Até ?','ZZZZZZZZZZZZZZZ')
self.oHelper.SetValue('Endereço De ?','')
self.oHelper.SetValue('Endereço Até ?','ZZZZZZZZZZZZZZZ')
self.oHelper.SetValue('Unitizador De ?','')
self.oHelper.SetValue('Unitizador Até ?','ZZZZZZ')
self.oHelper.SetValue('Tipo Transf. ?','Produto')
self.oHelper.SetButton('Ok')
self.oHelper.ClickBox('Produto','WMS00000G010')
self.oHelper.SetValue('Quantidade', '1,00', grid=True, grid_number=2)
self.oHelper.SetValue('Ender Des', 'A010001', grid=True, grid_number=2)
self.oHelper.LoadGrid()
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton('Fechar')
#self.oHelper.SearchBrowse('M SP 01 005WMS000000047 ')
#self.oHelper.SetButton('Outras Ações','Visualizar')
#self.oHelper.SetButton('Fechar')
#self.oHelper.SearchBrowse(key=2, index=True)
#self.oHelper.SearchBrowse('M SP 01 005WMS000025')
#self.oHelper.SetButton('Excluir')
#elf.oHelper.SetButton('Confirmar')
#self.oHelper.SetButton('Fechar')
self.oHelper.SetButton('X')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == "__main__":
unittest.main()
| 33.929825
| 79
| 0.618925
|
04f49a0fa56e9e3c427a54c5a5c46e451fd007e1
| 576
|
py
|
Python
|
scripts/colors.py
|
perlun/jetbrains-one-dark-theme
|
0daeaf372bec421640b8d6fdb4f26123df419ab0
|
[
"MIT"
] | 1
|
2019-11-04T09:41:22.000Z
|
2019-11-04T09:41:22.000Z
|
scripts/colors.py
|
Noman-ahk97/jetbrains-one-dark-theme
|
5b422df1de40e808fcff42df97252970b740e709
|
[
"MIT"
] | null | null | null |
scripts/colors.py
|
Noman-ahk97/jetbrains-one-dark-theme
|
5b422df1de40e808fcff42df97252970b740e709
|
[
"MIT"
] | null | null | null |
import os
import yaml
CONFIG_DIR = os.path.join(os.path.dirname(__file__), 'config')
if __name__ == '__main__':
with open(os.path.join(CONFIG_DIR, 'colors.yaml'), 'r') as input_file:
colors = yaml.load(input_file, Loader=yaml.FullLoader)
theme_file_path = os.path.join(CONFIG_DIR, 'theme.yaml')
with open(theme_file_path, 'r') as theme_file:
content = theme_file.read()
for name, value in colors.items():
content = content.replace(value, name)
with open(theme_file_path, 'w') as theme_file:
theme_file.write(content)
| 27.428571
| 74
| 0.680556
|
ddf722296b045e4651d40786b03f7d0884ace687
| 6,340
|
py
|
Python
|
odps/df/expr/tests/test_datetimes.py
|
Emersonxuelinux/aliyun-odps-python-sdk
|
0b38c777711c95ed1775fa67822febf88fc3d642
|
[
"Apache-2.0"
] | 412
|
2015-11-01T09:27:52.000Z
|
2022-03-26T05:04:03.000Z
|
odps/df/expr/tests/test_datetimes.py
|
Emersonxuelinux/aliyun-odps-python-sdk
|
0b38c777711c95ed1775fa67822febf88fc3d642
|
[
"Apache-2.0"
] | 168
|
2015-11-16T09:46:39.000Z
|
2022-03-17T06:35:26.000Z
|
odps/df/expr/tests/test_datetimes.py
|
Emersonxuelinux/aliyun-odps-python-sdk
|
0b38c777711c95ed1775fa67822febf88fc3d642
|
[
"Apache-2.0"
] | 103
|
2015-12-01T08:10:09.000Z
|
2022-02-21T12:46:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from odps.tests.core import TestBase
from odps.compat import unittest
from odps.models import Schema
from odps.df.types import validate_data_type
from odps.df.expr.tests.core import MockTable
from odps.df.expr.datetimes import *
class Test(TestBase):
def setup(self):
datatypes = lambda *types: [validate_data_type(t) for t in types]
schema = Schema.from_lists(['name', 'id', 'fid', 'isMale', 'scale', 'birth'],
datatypes('string', 'int64', 'float64', 'boolean', 'decimal', 'datetime'))
table = MockTable(name='pyodps_test_expr_table', schema=schema)
self.expr = CollectionExpr(_source_data=table, _schema=schema)
def testDatetimes(self):
self.assertRaises(AttributeError, lambda: self.expr.id.hour)
self.assertRaises(AttributeError, lambda: self.expr.fid.minute)
self.assertRaises(AttributeError, lambda: self.expr.isMale.week)
self.assertRaises(AttributeError, lambda: self.expr.scale.year)
self.assertRaises(AttributeError, lambda: self.expr.name.strftime('%Y'))
self.assertIsInstance(self.expr.birth.date, Date)
self.assertIsInstance(self.expr.birth.date, DatetimeSequenceExpr)
self.assertIsInstance(self.expr.birth.max().date, DatetimeScalar)
self.assertIsInstance(self.expr.birth.time, Time)
self.assertIsInstance(self.expr.birth.time, DatetimeSequenceExpr)
self.assertIsInstance(self.expr.birth.max().time, DatetimeScalar)
self.assertIsInstance(self.expr.birth.year, Year)
self.assertIsInstance(self.expr.birth.year, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().year, Int64Scalar)
self.assertIsInstance(self.expr.birth.month, Month)
self.assertIsInstance(self.expr.birth.month, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().month, Int64Scalar)
self.assertIsInstance(self.expr.birth.day, Day)
self.assertIsInstance(self.expr.birth.day, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().day, Int64Scalar)
self.assertIsInstance(self.expr.birth.hour, Hour)
self.assertIsInstance(self.expr.birth.hour, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().hour, Int64Scalar)
self.assertIsInstance(self.expr.birth.minute, Minute)
self.assertIsInstance(self.expr.birth.minute, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().minute, Int64Scalar)
self.assertIsInstance(self.expr.birth.second, Second)
self.assertIsInstance(self.expr.birth.second, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().second, Int64Scalar)
self.assertIsInstance(self.expr.birth.microsecond, MicroSecond)
self.assertIsInstance(self.expr.birth.microsecond, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().microsecond, Int64Scalar)
self.assertIsInstance(self.expr.birth.week, Week)
self.assertIsInstance(self.expr.birth.week, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().week, Int64Scalar)
self.assertIsInstance(self.expr.birth.weekofyear, WeekOfYear)
self.assertIsInstance(self.expr.birth.weekofyear, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().weekofyear, Int64Scalar)
self.assertIsInstance(self.expr.birth.dayofweek, WeekDay)
self.assertIsInstance(self.expr.birth.dayofweek, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().dayofweek, Int64Scalar)
self.assertIsInstance(self.expr.birth.weekday, WeekDay)
self.assertIsInstance(self.expr.birth.weekday, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().weekday, Int64Scalar)
self.assertIsInstance(self.expr.birth.dayofyear, DayOfYear)
self.assertIsInstance(self.expr.birth.dayofyear, Int64SequenceExpr)
self.assertIsInstance(self.expr.birth.max().dayofyear, Int64Scalar)
self.assertIsInstance(self.expr.birth.is_month_start, IsMonthStart)
self.assertIsInstance(self.expr.birth.is_month_start, BooleanSequenceExpr)
self.assertIsInstance(self.expr.birth.max().is_month_start, BooleanScalar)
self.assertIsInstance(self.expr.birth.is_month_end, IsMonthEnd)
self.assertIsInstance(self.expr.birth.is_month_end, BooleanSequenceExpr)
self.assertIsInstance(self.expr.birth.max().is_month_end, BooleanScalar)
self.assertIsInstance(self.expr.birth.is_year_start, IsYearStart)
self.assertIsInstance(self.expr.birth.is_year_start, BooleanSequenceExpr)
self.assertIsInstance(self.expr.birth.max().is_year_start, BooleanScalar)
self.assertIsInstance(self.expr.birth.is_year_end, IsYearEnd)
self.assertIsInstance(self.expr.birth.is_year_end, BooleanSequenceExpr)
self.assertIsInstance(self.expr.birth.max().is_year_end, BooleanScalar)
self.assertIsInstance(self.expr.birth.strftime('%Y'), Strftime)
self.assertIsInstance(self.expr.birth.strftime('%Y'), StringSequenceExpr)
self.assertIsInstance(self.expr.birth.max().strftime('%Y'), StringScalar)
expr = self.expr.birth + hour(10)
self.assertIsInstance(expr, DatetimeSequenceExpr)
expr = self.expr.birth - microsecond(100)
self.assertIsInstance(expr, DatetimeSequenceExpr)
expr = self.expr.birth - datetime.now()
self.assertIsInstance(expr, Int64SequenceExpr)
self.assertRaises(ExpressionError, lambda: self.expr.birth + datetime.now())
if __name__ == '__main__':
unittest.main()
| 47.669173
| 109
| 0.730915
|
2276bce9ffcd8566b4b0502e557c58343a7d098f
| 1,070
|
py
|
Python
|
predicators/src/args.py
|
williamshen-nz/predicators
|
6a6b3444108a6d2da3ec3c7d85bbe60ae3b113b9
|
[
"MIT"
] | null | null | null |
predicators/src/args.py
|
williamshen-nz/predicators
|
6a6b3444108a6d2da3ec3c7d85bbe60ae3b113b9
|
[
"MIT"
] | null | null | null |
predicators/src/args.py
|
williamshen-nz/predicators
|
6a6b3444108a6d2da3ec3c7d85bbe60ae3b113b9
|
[
"MIT"
] | null | null | null |
"""Contains settings that vary per run.
All global, immutable settings should be in settings.py.
"""
import argparse
def create_arg_parser(env_required: bool = True,
approach_required: bool = True,
seed_required: bool = True) -> argparse.ArgumentParser:
"""Defines command line argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=env_required, type=str)
parser.add_argument("--approach", required=approach_required, type=str)
parser.add_argument("--excluded_predicates", default="", type=str)
parser.add_argument("--seed", required=seed_required, type=int)
parser.add_argument("--timeout", default=10, type=float)
parser.add_argument("--make_videos", action="store_true")
parser.add_argument("--make_failure_videos", action="store_true")
parser.add_argument("--load_approach", action="store_true")
parser.add_argument("--load_data", action="store_true")
parser.add_argument("--experiment_id", default="", type=str)
return parser
| 42.8
| 77
| 0.703738
|
f73092a62dc94f15d8005c38a7f97315b3879895
| 2,582
|
py
|
Python
|
src/openstack_cli/commands/conf/keys/export.py
|
hapylestat/openstack_cli
|
be627f0b3c7ab9bf1032c36faca2ad101e53fb0e
|
[
"Apache-2.0"
] | null | null | null |
src/openstack_cli/commands/conf/keys/export.py
|
hapylestat/openstack_cli
|
be627f0b3c7ab9bf1032c36faca2ad101e53fb0e
|
[
"Apache-2.0"
] | 1
|
2021-03-02T07:15:43.000Z
|
2021-03-02T07:15:43.000Z
|
src/openstack_cli/commands/conf/keys/export.py
|
hapylestat/openstack_cli
|
be627f0b3c7ab9bf1032c36faca2ad101e53fb0e
|
[
"Apache-2.0"
] | 1
|
2021-03-23T10:00:56.000Z
|
2021-03-23T10:00:56.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from openstack_cli.commands.conf.keys.list import _keys_list
from openstack_cli.modules.apputils.terminal import Console
from openstack_cli.core.config import Configuration
from openstack_cli.modules.apputils.discovery import CommandMetaInfo
from openstack_cli.modules.openstack import VMKeypairItemValue, OpenStack
__module__ = CommandMetaInfo("export", "Export ssh keys to disk")
__args__ = __module__.arg_builder\
.add_default_argument("name", str, "Name of the key to be exported", default="")
def _keys_export(conf: Configuration, ostack: OpenStack, name: str):
if not name:
_keys = _keys_list(conf, ostack, True)
item = Console.ask("Select key to export", _type=int)
if item is None or item > len(_keys) - 1:
Console.print_warning("Invalid selection, aborting")
return
name = _keys[item].name
_key: VMKeypairItemValue
try:
_key = conf.get_key(name)
except KeyError as e:
Console.print_error(str(e))
return
d = os.getcwd()
_public_file_path = os.path.join(d, f"{_key.name}.public.key")
_private_file_path = os.path.join(d, f"{_key.name}.private.key")
if _key.public_key:
try:
with open(_public_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.public_key)
Console.print(f"Public key: {_public_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(public): {str(e)}")
if _key.private_key:
try:
with open(_private_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.private_key)
Console.print(f"Private key: {_private_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(private): {str(e)}")
def __init__(conf: Configuration, name: str):
ostack = OpenStack(conf)
_keys_export(conf, ostack, name)
| 36.366197
| 82
| 0.72773
|
0c9785c2d570ef00ef949f268c2c51f80508c758
| 1,118
|
py
|
Python
|
features/other.py
|
jacobjaeger/amadeus
|
5499528236845c2d7a22bcc88d10f02ddbdcfa51
|
[
"MIT"
] | null | null | null |
features/other.py
|
jacobjaeger/amadeus
|
5499528236845c2d7a22bcc88d10f02ddbdcfa51
|
[
"MIT"
] | null | null | null |
features/other.py
|
jacobjaeger/amadeus
|
5499528236845c2d7a22bcc88d10f02ddbdcfa51
|
[
"MIT"
] | null | null | null |
from discord.ext.commands import Cog, command, Context, Converter, BadArgument
from discord import Embed, Member
import discord
from aiohttp import ClientSession
from .common import invalid_arg
from random import choice, randint
from typing import Optional, Union
from re import compile
class Color(Converter):
REGEX = compile(r"#?([A-Fa-f0-9]{2}){3}")
async def convert(self, ctx: Context, argument):
if not self.REGEX.match(argument):
raise BadArgument(f"{argument} is not a valid hex color")
argument = argument if not argument.startswith("#") else argument[1:]
return tuple((argument[:2], argument[2:4], argument[4:]))
class Other(Cog):
@command("color", help="show info about a color")
async def color(self, ctx: Context, color: Color): # noqa
em = Embed(
title="#" + "".join(color),
description=f"rgb({int(color[0], base=16)}, {int(color[1], base=16)}, {int(color[2], base=16)})",
color=int("".join(color), base=16)
) # noqa
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Other())
| 32.882353
| 109
| 0.644902
|
a3460454e5b162ba28f47ea8a372e786114cdb8a
| 422
|
py
|
Python
|
forum/migrations/0003_auto_20190928_2259.py
|
duplxey/NForum
|
990215e5a841ac054fd3c0a167dee37298a70fb8
|
[
"MIT"
] | 7
|
2019-11-12T14:01:17.000Z
|
2022-01-29T19:17:09.000Z
|
forum/migrations/0003_auto_20190928_2259.py
|
duplxey/NForum
|
990215e5a841ac054fd3c0a167dee37298a70fb8
|
[
"MIT"
] | 4
|
2019-12-08T10:03:21.000Z
|
2020-04-07T20:27:53.000Z
|
forum/migrations/0003_auto_20190928_2259.py
|
duplxey/NForum
|
990215e5a841ac054fd3c0a167dee37298a70fb8
|
[
"MIT"
] | 1
|
2022-01-29T13:44:24.000Z
|
2022-01-29T13:44:24.000Z
|
# Generated by Django 2.2.5 on 2019-09-28 20:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0002_auto_20190928_2258'),
]
operations = [
migrations.AlterField(
model_name='category',
name='subcategories',
field=models.ManyToManyField(default=None, to='forum.Subcategory'),
),
]
| 22.210526
| 79
| 0.618483
|
09566356a21669017ea7782a85c4f30055cb40a7
| 1,125
|
py
|
Python
|
tests/unit/workflow/processors/test_target_postchecker.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | 3
|
2018-11-27T15:45:19.000Z
|
2018-12-21T20:32:10.000Z
|
tests/unit/workflow/processors/test_target_postchecker.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | 18
|
2018-12-02T18:38:59.000Z
|
2020-02-05T22:09:37.000Z
|
tests/unit/workflow/processors/test_target_postchecker.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | null | null | null |
import pytest
from edera import Condition
from edera import Task
from edera.exceptions import TargetVerificationError
from edera.requisites import shortcut
from edera.workflow import WorkflowBuilder
from edera.workflow.processors import TargetPostChecker
def test_target_postchecker_always_executes_task():
class C(Condition):
def check(self):
return True
class T(Task):
target = C()
def execute(self):
pass
class X(Task):
@shortcut
def requisite(self):
return T()
workflow = WorkflowBuilder().build(X())
TargetPostChecker().process(workflow)
assert workflow[X()].item.phony
workflow[T()].item.execute()
def test_target_postchecker_verifies_target_after_task_execution():
class C(Condition):
def check(self):
return False
class T(Task):
target = C()
def execute(self):
pass
workflow = WorkflowBuilder().build(T())
TargetPostChecker().process(workflow)
with pytest.raises(TargetVerificationError):
workflow[T()].item.execute()
| 20.454545
| 67
| 0.662222
|
3ba33eff0dd0499dcd0011a5603e946a533ab149
| 2,200
|
py
|
Python
|
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/lib/logger.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | 1
|
2017-03-28T06:41:51.000Z
|
2017-03-28T06:41:51.000Z
|
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/lib/logger.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | null | null | null |
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/lib/logger.py
|
mdavid/nuxleus
|
653f1310d8bf08eaa5a7e3326c2349e56a6abdc2
|
[
"BSD-3-Clause"
] | 1
|
2016-12-13T21:08:58.000Z
|
2016-12-13T21:08:58.000Z
|
# -*- coding: utf-8 -*-
from Axon.Component import component
from Axon.Ipc import shutdownMicroprocess, producerFinished
from bridge import Element as E
import logging
from logging import handlers
class Logger(component):
Inboxes = {"inbox" : "String to be logged",
"control" : "Closes the logger",}
Outboxes = {"outbox" : "UNUSED",
"signal" : "UNUSED",}
def __init__(self, path=None, stdout=False, name=None):
super(Logger, self).__init__()
self.path = path
self.with_stdout = stdout
self.name = name
def main(self):
logger = logging.getLogger("kamaelia.logger.%s" % self.name or '')
logger.setLevel(logging.DEBUG)
logfmt = logging.Formatter("[%(asctime)s] %(message)s")
if self.path:
h = handlers.RotatingFileHandler(self.path, maxBytes=1048576, backupCount=5)
h.setLevel(logging.DEBUG)
h.setFormatter(logfmt)
logger.addHandler(h)
if self.with_stdout:
import sys
h = logging.StreamHandler(sys.stdout)
h.setLevel(logging.DEBUG)
h.setFormatter(logfmt)
logger.addHandler(h)
yield 1
while 1:
if self.dataReady("control"):
mes = self.recv("control")
if isinstance(mes, shutdownMicroprocess) or isinstance(mes, producerFinished):
logger.close()
self.send(producerFinished(), "signal")
break
if self.dataReady("inbox"):
msg = token = self.recv("inbox")
if isinstance(token, tuple):
if isinstance(token[1], E):
msg = "%s : %s" % (msg[0], msg[1].xml(omit_declaration=True, indent=False))
else:
msg = "%s : %s" % (msg[0], msg[1])
elif isinstance(msg, E):
msg = msg.xml(omit_declaration=True, indent=False)
logger.debug(msg)
if not self.anyReady():
self.pause()
yield 1
| 30.555556
| 99
| 0.522727
|
45bbe38b7b488aacbf20334b1b53e675b7c9d345
| 1,417
|
py
|
Python
|
keno/utility/stop_watch.py
|
matthewdeanmartin/keno
|
04724f5ebf53b5f11d150ec587750a18d0d3fee0
|
[
"MIT"
] | null | null | null |
keno/utility/stop_watch.py
|
matthewdeanmartin/keno
|
04724f5ebf53b5f11d150ec587750a18d0d3fee0
|
[
"MIT"
] | 17
|
2018-07-14T17:48:04.000Z
|
2020-09-16T10:25:59.000Z
|
keno/utility/stop_watch.py
|
matthewdeanmartin/keno
|
04724f5ebf53b5f11d150ec587750a18d0d3fee0
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
How long is this taking
"""
import datetime
class Timer(object):
"""A simple timer class"""
def __init__(self) -> None:
self.split_start: datetime.datetime
self.start_time: datetime.datetime
self.stop_time: datetime.datetime
def start(self) -> datetime.datetime:
"""Starts the timer"""
self.start_time = datetime.datetime.now()
return self.start_time
def stop(self, message: str = "Total: ") -> str:
"""Stops the timer. Returns the time elapsed"""
self.stop_time = datetime.datetime.now()
return message + str(self.stop_time - self.start_time)
def now(self, message: str = "Now: ") -> str:
"""Returns the current time with a message"""
return message + ": " + str(datetime.datetime.now())
def elapsed(self, message: str = "Elapsed: ") -> str:
"""Time elapsed since start was called"""
return message + " " + str(datetime.datetime.now() - self.start_time)
def split(self, message: str = "Split started at: ") -> str:
"""Start a split timer"""
self.split_start = datetime.datetime.now()
return message + str(self.split_start)
def unsplit(self, message: str = "Unsplit: ") -> str:
"""Stops a split. Returns the time elapsed since split was called"""
return message + str(datetime.datetime.now() - self.split_start)
| 33.738095
| 77
| 0.62103
|
a03d6120ff4b8c32e66b0cde7b7ebd965af76042
| 473
|
py
|
Python
|
Django/scraping/news_scraping.py
|
hanana1253/TIL
|
301ba13acdba677b19be1c03fcd4135ff06d862c
|
[
"MIT"
] | 3
|
2021-06-01T08:31:54.000Z
|
2022-02-02T06:03:18.000Z
|
Django/scraping/news_scraping.py
|
hanana1253/TIL
|
301ba13acdba677b19be1c03fcd4135ff06d862c
|
[
"MIT"
] | null | null | null |
Django/scraping/news_scraping.py
|
hanana1253/TIL
|
301ba13acdba677b19be1c03fcd4135ff06d862c
|
[
"MIT"
] | 2
|
2021-03-27T06:45:12.000Z
|
2022-01-07T09:34:32.000Z
|
import requests
from bs4 import BeautifulSoup
for startnum in range(1, 100, 10):
url = 'https://search.naver.com/search.naver?where=news&sm=tab_jum&query=%EC%BD%94%EB%A1%9C%EB%82%98&start={}'.format(startnum)
response = requests.get(url)
html_text = response.text
soup = BeautifulSoup(html_text, 'html.parser')
news_headings = soup.find_all("a", "news_tit")
for heading in news_headings:
print(heading['title'], heading['href'], sep='\n')
| 36.384615
| 131
| 0.691332
|
b749425984be001e996f17e1336c011eb82344d4
| 1,749
|
py
|
Python
|
Predictive_Tracker/BoxKalmanFilter.py
|
Royzon/3d-tracking-approaches
|
58cdf55b5fef5c6ceae67f0a44d589d08b24959c
|
[
"MIT"
] | 1
|
2021-12-17T16:23:56.000Z
|
2021-12-17T16:23:56.000Z
|
Predictive_Tracker/BoxKalmanFilter.py
|
Royzon/3d-tracking-approaches
|
58cdf55b5fef5c6ceae67f0a44d589d08b24959c
|
[
"MIT"
] | null | null | null |
Predictive_Tracker/BoxKalmanFilter.py
|
Royzon/3d-tracking-approaches
|
58cdf55b5fef5c6ceae67f0a44d589d08b24959c
|
[
"MIT"
] | null | null | null |
import numpy as np
from filterpy.kalman import KalmanFilter
# (x, y, z, w, l, h, angle, v_x, v_y, v_z, v_angle)
class BoxKalmanFilter():
def __init__(self, box, id, type):
self.id = id
self.type = type
self.age = 0
self.blind_time = 0
self.kf = KalmanFilter(dim_x=11, dim_z=7)
self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0,0],
[0,1,0,0,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,0,0,1,0],
[0,0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0,1],
[0,0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0,0]])
self.kf.P[7:,7:] *= 1000.
self.kf.P *= 10.
self.kf.Q[7:,7:] *= 0.01
self.kf.x[:7] = box.reshape((7, 1))
def update(self, box):
self.blind_time = 0
self.kf.update(box)
def predict(self):
self.blind_time += 1
self.age += 1
self.kf.predict()
def get_state(self):
return self.kf.x[:7].reshape((7,))
| 33.634615
| 57
| 0.348771
|
4f6ee2e5176bc62a36b9c645fe8eab63d82c438e
| 704
|
py
|
Python
|
components/google-cloud/google_cloud_pipeline_components/version.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 1
|
2021-11-22T12:22:14.000Z
|
2021-11-22T12:22:14.000Z
|
components/google-cloud/google_cloud_pipeline_components/version.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 13
|
2021-07-06T06:47:43.000Z
|
2022-03-08T23:35:13.000Z
|
components/google-cloud/google_cloud_pipeline_components/version.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 1
|
2021-08-22T15:51:17.000Z
|
2021-08-22T15:51:17.000Z
|
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the version string of Google Cloud Pipeline Components."""
__version__ = "0.2.1.dev"
| 41.411765
| 74
| 0.761364
|
73ac107c68e67c1f540c7dd532312b929cb8b662
| 1,341
|
py
|
Python
|
ffield.py
|
Nlioxa/ffield
|
0ffb150b6c0336a12b5f5d7f448054c385dd56b4
|
[
"MIT"
] | 1
|
2020-12-16T11:50:45.000Z
|
2020-12-16T11:50:45.000Z
|
ffield.py
|
Nlioxa/ffield
|
0ffb150b6c0336a12b5f5d7f448054c385dd56b4
|
[
"MIT"
] | null | null | null |
ffield.py
|
Nlioxa/ffield
|
0ffb150b6c0336a12b5f5d7f448054c385dd56b4
|
[
"MIT"
] | null | null | null |
class FNumber:
def __init__(self, ffield, value):
self.f = ffield
self.v = int(value) % self.f.p
def __neg__(self):
return FNumber(self.f, -self.v)
def __add__(self, other):
return FNumber(self.f, int(self) + int(other))
def __sub__(self, other):
return -other + self
def __mul__(self, other):
return FNumber(self.f, int(self) * int(other))
def __int__(self):
return self.v
def __truediv__(self, other):
return self.f.mod(self.f.find(int(self)) - self.f.find(int(other)))
def __pow__(self, other):
return self.f.mod(self.f.find(int(self)) * int(other))
def __eq__(self, other):
return int(self) == int(other)
def __str__(self):
return str(int(self))
def __repr__(self):
return str(int(self))
class FField:
def __init__(self, p, w):
self.p = p
self.w = w
def mod(self, j):
b = 1
for _ in range(j % self.p - 1 if j < 0 else j):
b = (b * self.w) % self.p
return self.num(b)
def find(self, b):
for j in range(self.p):
if int(b) == self.mod(j):
return j
return -1
def num(self, value):
return FNumber(self, value)
| 24.381818
| 76
| 0.517524
|
4483510427ca1667081aa51f724a87f274a66909
| 917
|
py
|
Python
|
src/apps/domains/location/migrations/0007_auto_20190721_0529.py
|
antiline/jun2
|
00928cea1f4b8cd6634cf9a1ae6dc19c95d0e54c
|
[
"MIT"
] | null | null | null |
src/apps/domains/location/migrations/0007_auto_20190721_0529.py
|
antiline/jun2
|
00928cea1f4b8cd6634cf9a1ae6dc19c95d0e54c
|
[
"MIT"
] | 17
|
2019-06-24T14:11:49.000Z
|
2021-06-04T22:19:59.000Z
|
src/apps/domains/location/migrations/0007_auto_20190721_0529.py
|
antiline/jun2
|
00928cea1f4b8cd6634cf9a1ae6dc19c95d0e54c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-07-20 20:29
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('location', '0006_auto_20190721_0341'),
]
operations = [
migrations.AlterField(
model_name='extractgpsfromexifstatus',
name='image_path',
field=models.FilePathField(allow_files=False, allow_folders=True, path='/Users/antiline/Documents/Jun2/mount', recursive=True, verbose_name='Image 경로'),
),
migrations.AlterField(
model_name='gpxshare',
name='share_uuid',
field=models.UUIDField(default=uuid.UUID('84574781-f532-4aaa-991e-5437c65c7247'), unique=True, verbose_name='공유 UUID'),
),
migrations.AlterModelTable(
name='extractgpsfromexifstatus',
table='extract_gps_from_exif_status',
),
]
| 31.62069
| 164
| 0.642312
|
aee9c25fd03d501f2094e2fd83c83bb5971b3b8e
| 11,214
|
py
|
Python
|
tensorflow_ranking/python/feature_test.py
|
we1559/ranking
|
7de1bd2d366d942c0acaabc91876035cc1d9c3f0
|
[
"Apache-2.0"
] | 2
|
2019-07-29T00:56:01.000Z
|
2021-05-18T01:01:33.000Z
|
tensorflow_ranking/python/feature_test.py
|
rishabhmehrotra/ranking
|
8536405845e8ca180dae01c4d0e34ecb1be89b65
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_ranking/python/feature_test.py
|
rishabhmehrotra/ranking
|
8536405845e8ca180dae01c4d0e34ecb1be89b65
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Ranking Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feature transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow_ranking.python import feature as feature_lib
EXAMPLE_1_PROTO = """features: {
feature: { key: "example_0.age" value: { int64_list: { value: [ 41 ] } } }
feature: { key: "example_0.weight" value: { int64_list: { value: [ 77 ] } } }
feature: { key: "example_0.label" value: { float_list: { value: [ 1.0 ] } } }
feature: { key: "example_1.age" value: { int64_list: { value: [ 43 ] } } }
feature: { key: "example_1.weight" value: { int64_list: { value: [ 78 ] } } }
feature: { key: "example_1.label" value: { float_list: { value: [ 0.0 ] } } }
}"""
EXAMPLE_2_PROTO = """features: {
feature: { key: "example_0.age" value: { int64_list: { value: [ 25 ] } } }
feature: { key: "example_0.weight" value: { int64_list: { value: [ 95 ] } } }
feature: { key: "example_0.label" value: { float_list: { value: [ 1.0 ] } } }
feature: { key: "example_1.age" value: { int64_list: { value: [ 53 ] } } }
feature: { key: "example_1.weight" value: { int64_list: { value: [ 85 ] } } }
feature: { key: "example_1.label" value: { float_list: { value: [ 0.0 ] } } }
}
"""
def _create_input_fn():
def my_input_fn():
feature_to_type = {
"example_0.age": tf.io.FixedLenFeature([1], tf.int64),
"example_1.age": tf.io.FixedLenFeature([1], tf.int64),
"example_0.weight": tf.io.FixedLenFeature([1], tf.int64),
"example_1.weight": tf.io.FixedLenFeature([1], tf.int64),
"example_0.label": tf.io.FixedLenFeature([1], tf.float32),
"example_1.label": tf.io.FixedLenFeature([1], tf.float32)
}
feature_1_proto = tf.train.Example()
feature_2_proto = tf.train.Example()
text_format.Merge(EXAMPLE_1_PROTO, feature_1_proto)
text_format.Merge(EXAMPLE_2_PROTO, feature_2_proto)
features_tensor = tf.io.parse_example(
serialized=[
feature_1_proto.SerializeToString(),
feature_2_proto.SerializeToString()
],
features=feature_to_type)
# Create the dataset.
dataset = tf.data.Dataset.from_tensor_slices(features_tensor).batch(2)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return my_input_fn
class FeatureLibTest(tf.test.TestCase, parameterized.TestCase):
def test_make_identity_transform_fn(self):
features = {
"context": # Input size: (batch_size=2, num_features=2).
tf.convert_to_tensor(value=[[1.0, 1.0], [1.0, 1.0]]),
"per_example":
tf.convert_to_tensor(value=[[[10.0]], [[10.0]]]),
}
with tf.compat.v1.Session() as sess:
transform_fn = feature_lib.make_identity_transform_fn(["context"])
context_features, per_example_features = sess.run(
transform_fn(features, 1))
self.assertEqual(["context"], sorted(context_features))
self.assertAllEqual([[1.0, 1.0], [1.0, 1.0]], context_features["context"])
self.assertEqual(["per_example"], sorted(per_example_features))
self.assertAllEqual([[[10.0]], [[10.0]]],
per_example_features["per_example"])
def test_encode_features(self):
# Inputs.
vocabulary_size = 4
# -1 values are ignored.
input_a = np.array([
[3, -1, -1], # example 0, ids [3]
[0, 1, -1], # example 1, ids [0, 1]
])
input_b = np.array([
[0, -1, -1], # example 0, ids [0]
[-1, -1, -1], # example 1, ids []
])
input_features = {"aaa": input_a, "bbb": input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(9., 13.) # id 3
)
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(9., 13.), # ids [3], embedding = [9, 13]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
(1., 2.), # ids [0], embedding = [1, 2]
# example 1:
(0., 0.), # ids [], embedding = [0, 0]
)
# Build columns.
categorical_column_a = feature_column.categorical_column_with_identity(
key="aaa", num_buckets=vocabulary_size)
categorical_column_b = feature_column.categorical_column_with_identity(
key="bbb", num_buckets=vocabulary_size)
embed_column_a, embed_column_b = feature_column.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=lambda shape, dtype, partition_info: embedding_values,
shared_embedding_collection_name="custom_collection_name")
feature_columns = {"aaa": embed_column_a, "bbb": embed_column_b}
cols_to_tensors = feature_lib.encode_features(
input_features,
feature_columns.values(),
mode=tf.estimator.ModeKeys.EVAL)
embedding_lookup_a = cols_to_tensors[feature_columns["aaa"]]
embedding_lookup_b = cols_to_tensors[feature_columns["bbb"]]
# Assert expected embedding variable and lookups.
global_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
embedding_var = global_vars[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval())
def test_encode_listwise_features(self):
# Batch size = 2, list_size = 2.
features = {
"query_length":
tf.convert_to_tensor(value=[[1], [2]]),
"utility":
tf.convert_to_tensor(value=[[[1.0], [0.0]], [[0.0], [1.0]]]),
"unigrams":
tf.SparseTensor(
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]],
values=["ranking", "regression", "classification", "ordinal"],
dense_shape=[2, 2, 1])
}
context_feature_columns = {
"query_length":
feature_column.numeric_column(
"query_length", shape=(1,), default_value=0, dtype=tf.int64)
}
example_feature_columns = {
"utility":
feature_column.numeric_column(
"utility", shape=(1,), default_value=0.0, dtype=tf.float32),
"unigrams":
feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
"unigrams",
vocabulary_list=[
"ranking", "regression", "classification", "ordinal"
]),
dimension=10)
}
with self.assertRaisesRegexp(
ValueError,
r"2nd dimesion of tensor must be equal to input size: 3, but found .*"):
feature_lib.encode_listwise_features(
features,
input_size=3,
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns)
context_features, example_features = feature_lib.encode_listwise_features(
features,
input_size=2,
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns)
self.assertAllEqual(["query_length"], sorted(context_features))
self.assertAllEqual(["unigrams", "utility"], sorted(example_features))
self.assertAllEqual([2, 2, 10],
example_features["unigrams"].get_shape().as_list())
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
context_features, example_features = sess.run(
[context_features, example_features])
self.assertAllEqual([[1], [2]], context_features["query_length"])
self.assertAllEqual([[[1.0], [0.0]], [[0.0], [1.0]]],
example_features["utility"])
def test_encode_pointwise_features(self):
# Batch size = 2, tf.Example input format.
features = {
"query_length":
tf.convert_to_tensor(value=[[1], [1]]), # Repeated context feature.
"utility":
tf.convert_to_tensor(value=[[1.0], [0.0]]),
"unigrams":
tf.SparseTensor(
indices=[[0, 0], [1, 0]],
values=["ranking", "regression"],
dense_shape=[2, 1])
}
context_feature_columns = {
"query_length":
tf.feature_column.numeric_column(
"query_length", shape=(1,), default_value=0, dtype=tf.int64)
}
example_feature_columns = {
"utility":
tf.feature_column.numeric_column(
"utility", shape=(1,), default_value=0.0, dtype=tf.float32),
"unigrams":
tf.feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
"unigrams",
vocabulary_list=[
"ranking", "regression", "classification", "ordinal"
]),
dimension=10)
}
(context_features,
example_features) = feature_lib.encode_pointwise_features(
features,
context_feature_columns=context_feature_columns,
example_feature_columns=example_feature_columns)
self.assertAllEqual(["query_length"], sorted(context_features))
self.assertAllEqual(["unigrams", "utility"], sorted(example_features))
# Unigrams dense tensor has shape: [batch_size=2, list_size=1, dim=10].
self.assertAllEqual([2, 1, 10],
example_features["unigrams"].get_shape().as_list())
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
context_features, example_features = sess.run(
[context_features, example_features])
self.assertAllEqual([[1], [1]], context_features["query_length"])
# Utility tensor has shape: [batch_size=2, list_size=1, 1].
self.assertAllEqual([[[1.0]], [[0.0]]], example_features["utility"])
if __name__ == "__main__":
tf.test.main()
| 39.625442
| 81
| 0.630462
|
41bc4868190dd457679f2c9eb5066100ad0c5a15
| 17,190
|
py
|
Python
|
CoreHandler.py
|
hiramegl/AaMakro5oul
|
fda92a4da1182965eea8bb4c508afdf43e2a8704
|
[
"Apache-2.0"
] | 3
|
2019-05-01T20:33:12.000Z
|
2021-02-15T10:38:35.000Z
|
CoreHandler.py
|
hiramegl/AaMakro5oul
|
fda92a4da1182965eea8bb4c508afdf43e2a8704
|
[
"Apache-2.0"
] | 1
|
2020-10-12T00:14:11.000Z
|
2020-10-13T11:50:36.000Z
|
CoreHandler.py
|
hiramegl/AaMakro5oul
|
fda92a4da1182965eea8bb4c508afdf43e2a8704
|
[
"Apache-2.0"
] | null | null | null |
# ******************************************************************************
# This file is part of the AaMakro5oul project
# (An OSC/MIDI controller for Ableton Live with DJ features)
#
# Full project source: https://github.com/hiramegl/AaMakro5oul
#
# License : Apache License 2.0
# Full license: https://github.com/hiramegl/AaMakro5oul/blob/master/LICENSE
#
# Copyright 2018, 2019 by Hiram Galicia (hiramegl@yahoo.com)
# http://www.unasystems.com
#
# All rights reserved.
# ******************************************************************************
from BaseHandler import BaseHandler
# ******************************************************************************
# Core Handler: gives advanced control of Ableton Live features
# and keeps track of static variables shared by several modules
# ******************************************************************************
class CoreHandler(BaseHandler):
# session state static variables
m_nGuiTrackOffset = 0 # remote GUI track offset
m_nGuiVisTracks = 8 # remote GUI visible tracks in the screen
m_nGuiSceneOffset = 0 # remove GUI scene offset
m_nGuiVisScenes = 8 # remove GUI visible scenes in the screen
m_nGuiVisSends = 8 # max num sends in remote GUI that we manage in session (tools modal) and sequence tabs
m_nGuiVisReturns = 8 # max num returns in remote GUI that we manage in session tab (tools modal)
# beat sequence static variables
m_nGuiVisBeats = 8 # number of visible beats in beat grid
m_nGuiVisNotes = 12 # number of visible notes in beat grid
m_nSeqOffset = -1 # sequence offset for beat grid (in beats, time axis)
m_nNoteOffset = -1 # note offset for beat grid (idx abs, note axis)
m_nSelNoteRelIdx = -1 # selected note index
# track clip static variables (for build-ups and drop-downs)
m_bNextSelectionOn = False
m_aNextClips = []
m_aTrackIndeces = [str(i) for i in xrange(m_nGuiVisTracks)] + ['master', 'selected']
m_aSceneIndeces = [str(i) for i in xrange(m_nGuiVisScenes)] + ['selected']
m_aBeatsIndeces = [str(i) for i in xrange(m_nGuiVisBeats)]
m_hBeats = {
"6400": 64.0,
"3200": 32.0,
"1600": 16.0,
"0800": 8.0,
"0400": 4.0,
"0200": 2.0,
"0100": 1.0,
"0050": 0.5,
"0025": 0.25,
"0012": 0.125,
"0006": 0.0625,
"0003": 0.03125,
"0001": 0.015625
}
# track selection for device control
m_hDevTrackSel = {
'a': ['none', -1],
'b': ['none', -1],
'c': ['none', -1],
'd': ['none', -1]
}
m_nMaxTrackSels = 8
m_nMaxReturnSels = 8
m_aChannels = ['a', 'b', 'c', 'd']
m_aChannelsFx = ['a', 'b', 'c', 'd', 'x']
# track selection for effects channel
m_aFxTrackSel = ['none', -1]
"""
Note-offset explanation diagram:
Start Note C1 ---+ +--- Middle C
| |
V V | Button |
Octave |-2 -1 0 1 2 3 4 5 6 7 8 | row | Off Rel Abs | Off Rel Abs | Off Rel Abs |
Note +------------------------------------------------------+--------+---------------+---------------+----------------+
B | 11 23 35 47 59 71 83 95 107 [119] | 0 | 11 - 0 = 11 | 47 - 0 = 47 | 119 - 0 = 119 |
A# | 10 22 34 46 58 70 82 94 106 118 | 1 | | | --- --- |
A | 9 21 33 45 57 69 81 93 105 117 | 2 | | | Max Max |
G# | 8 20 32 44 56 68 80 92 104 116 | 3 | | | offset val |
G | 7 19 31 43 55 67 79 91 103 115 127 | 4 | | | |
F# | 6 18 30 42 54 66 78 90 102 114 126 | 5 | | | |
F | 5 17 29 41 53 65 77 89 101 113 125 | 6 | | | |
E | 4 16 28 40 52 64 76 88 100 112 124 | 7 | | [Octave 1] | |
D# | 3 15 27 39 51 63 75 87 99 111 123 | 8 | offset val | C1 | |
D | 2 14 26 38 50 62 74 86 98 110 122 | 9 | Min Min | Default | |
C# | 1 13 25 37 49 61 73 85 97 109 121 | 10 | -- --- | -- -- | |
C |[0] 12 24 (36) 48 _60_ 72 84 96 108 120 | 11 | 11 - 11 = 0 | 47 - 11 = 36 | 119 - 11 = 108 |
+------------------------------------------------------+--------+---------------+---------------+----------------+
"""
# constructor
def __init__(self, _oCtrlInstance, _oOscServer, _hConfig):
BaseHandler.__init__(self, _oCtrlInstance, _oOscServer, _hConfig)
# CONSTANTS
self.m_nNumBeatsInBar = 4 # number of beats in a bar (4 for 4/4 notation)
self.m_nNumBitsInBeat = 4 # number of bits in a beat (the divisions within a beat)
self.m_nNumNotesInOct = 12 # number of notes per octave
self.m_nDefNoteOffset = 47 # default note offset for Octave 1 (C1 note)
self.m_nMinNoteOffset = 11 # minimum note offset
self.m_nMaxNoteOffset = 119 # maximum note offset
# beat note accessors ********************************************
def gui_vis_beats(self, _nGuiVisBeats = -1):
if (_nGuiVisBeats == -1):
return CoreHandler.m_nGuiVisBeats
CoreHandler.m_nGuiVisBeats = _nGuiVisBeats
def seq_offset(self, _nSeqOffset = -1):
if (_nSeqOffset == -1):
return CoreHandler.m_nSeqOffset
CoreHandler.m_nSeqOffset = _nSeqOffset
def note_offset(self, _nNoteOffset = -1):
if (_nNoteOffset == -1):
return CoreHandler.m_nNoteOffset
CoreHandler.m_nNoteOffset = _nNoteOffset
# input: value between 0 and 11
# output: value between 0 and 119 (should be 127 in the future)
def note_idx_abs(self, _nNoteIdxRel):
return CoreHandler.m_nNoteOffset - _nNoteIdxRel
# input: value between 0 and 119 (should be 127 in the future)
# output: value between 0 and 11
def note_idx_rel(self, _nNoteIdxAbs):
return CoreHandler.m_nNoteOffset - _nNoteIdxAbs
def note_time_abs(self, _nNoteTimeRel):
return _nNoteTimeRel + CoreHandler.m_nSeqOffset
def note_time_rel(self, _nNoteTimeAbs):
return _nNoteTimeAbs - CoreHandler.m_nSeqOffset
# input: value between 0 and 127
def is_note_visible(self, _nNoteIdxAbs, _nNoteTimeAbs):
nNoteIdxRel = self.note_idx_rel(_nNoteIdxAbs)
nNoteTimeRel = self.note_time_rel(_nNoteTimeAbs)
return (nNoteIdxRel >= 0 and nNoteIdxRel < CoreHandler.m_nGuiVisNotes
and nNoteTimeRel >= 0 and nNoteTimeRel < CoreHandler.m_nGuiVisBeats)
def current_octave(self):
return (CoreHandler.m_nNoteOffset / self.m_nNumNotesInOct) - 2 # MIDI Standard starts in octave -2
# selected note, relative index: from 0 to 11
def sel_note_rel_idx(self, _nSelNodeRelIdx = -1):
if (_nSelNodeRelIdx == -1):
return CoreHandler.m_nSelNoteRelIdx
CoreHandler.m_nSelNoteRelIdx = _nSelNodeRelIdx
# track and returns accessors ************************************
def tracks(self):
return self.song().visible_tracks
def returns(self):
return self.song().return_tracks
def master(self):
return self.song().master_track
def tracks_and_returns(self):
return tuple(self.tracks()) + tuple(self.returns())
def get_track_or_return(self, _nTrackIdxAbs):
aTracksAndReturns = self.tracks_and_returns()
return aTracksAndReturns[_nTrackIdxAbs]
def get_track(self, _nTrackIdxAbs):
aTracks = self.tracks()
return aTracks[_nTrackIdxAbs]
def get_return(self, _nTrackIdxAbs):
aReturns = self.returns()
return aReturns[_nTrackIdxAbs]
def sel_track(self, _oTrack = None):
if (_oTrack != None):
self.song().view.selected_track = _oTrack
return self.song().view.selected_track
def sel_track_idx_abs(self):
aAllTracks = self.tracks_and_returns()
oSelTrack = self.sel_track()
return list(aAllTracks).index(oSelTrack)
def is_track_available(self, _nTrackIdxAbs):
return (_nTrackIdxAbs < len(self.tracks()))
def is_return_available(self, _nReturnIdxAbs):
return (_nReturnIdxAbs < len(self.returns()))
def is_return_track(self, _oTrack):
return (_oTrack in self.returns())
# only audio tracks (0 ... num audio tracks - 1)
def tracks_range(self):
return range(len(self.tracks()))
# only return tracks (0 ... num return tracks - 1)
def returns_range(self):
return range(len(self.returns()))
# both audio tracks and return tracks (0 ... num tracks + num returns - 1)
def tracks_and_returns_range(self):
return range(len(self.tracks_and_returns()))
# scene accessors ************************************************
def scenes(self):
return self.song().scenes
def get_scene(self, _nSceneIdxAbs):
aAllScenes = self.scenes()
return aAllScenes[_nSceneIdxAbs]
def sel_scene(self, _oScene = None):
if (_oScene != None):
self.song().view.selected_scene = _oScene
return self.song().view.selected_scene
def sel_scene_idx_abs(self):
aAllScenes = self.scenes()
oSelScene = self.sel_scene()
return list(aAllScenes).index(oSelScene)
def is_scene_available(self, _nSceneIdxAbs):
return (_nSceneIdxAbs < len(self.scenes()))
def available_scenes(self):
return range(len(self.scenes()))
# clip accessors *************************************************
def sel_clip_slot(self):
return self.song().view.highlighted_clip_slot
def is_clip_available(self, _nTrackIdxAbs, _nSceneIdxAbs):
return self.is_track_available(_nTrackIdxAbs) and self.is_scene_available(_nSceneIdxAbs)
# next clips accessors *********************************
def next_selection_on(self, _bNextSelectionOn = None):
if (_bNextSelectionOn != None):
CoreHandler.m_bNextSelectionOn = _bNextSelectionOn
return CoreHandler.m_bNextSelectionOn
def next_clips_add(self, _nTrackIdxAbs, _nSceneIdxAbs):
CoreHandler.m_aNextClips.append([_nTrackIdxAbs, _nSceneIdxAbs])
def next_clips_clear(self):
CoreHandler.m_aNextClips = []
def next_clips(self):
return CoreHandler.m_aNextClips
# gui track accessors **********************************
def gui_track_offset(self, _nGuiTrackOffset = -1):
if (_nGuiTrackOffset == -1):
return CoreHandler.m_nGuiTrackOffset
CoreHandler.m_nGuiTrackOffset = _nGuiTrackOffset
def gui_num_tracks(self, _nGuiNumTracks = -1):
if (_nGuiNumTracks == -1):
return CoreHandler.m_nGuiVisTracks
CoreHandler.m_nGuiVisTracks = _nGuiNumTracks
def gui_visible_tracks_abs_range(self):
return range(CoreHandler.m_nGuiTrackOffset, CoreHandler.m_nGuiTrackOffset + CoreHandler.m_nGuiVisTracks)
def gui_visible_tracks_rel_range(self, _aExtra = None):
aTracks = list(range(CoreHandler.m_nGuiVisTracks)) # using a list to be able to add _aExtra elements
return aTracks if (_aExtra == None) else aTracks + _aExtra
def track_idx_rel(self, _nTrackIdxAbs):
return (_nTrackIdxAbs - CoreHandler.m_nGuiTrackOffset) % CoreHandler.m_nGuiVisTracks
def track_idx_abs(self, _nTrackIdxRel):
return _nTrackIdxRel + CoreHandler.m_nGuiTrackOffset
def is_track_visible(self, _nTrackIdxAbs):
nTrackIdxRel = _nTrackIdxAbs - CoreHandler.m_nGuiTrackOffset
return (nTrackIdxRel >= 0
and nTrackIdxRel < CoreHandler.m_nGuiVisTracks)
# gui scene accessors **********************************
def gui_scene_offset(self, _nGuiSceneOffset = -1):
if (_nGuiSceneOffset == -1):
return CoreHandler.m_nGuiSceneOffset
CoreHandler.m_nGuiSceneOffset = _nGuiSceneOffset
def gui_num_scenes(self, _nGuiNumScenes = -1):
if (_nGuiNumScenes == -1):
return CoreHandler.m_nGuiVisScenes
CoreHandler.m_nGuiVisScenes = _nGuiNumScenes
def gui_visible_scenes_abs_range(self):
return range(CoreHandler.m_nGuiSceneOffset, CoreHandler.m_nGuiSceneOffset + CoreHandler.m_nGuiVisScenes)
def gui_visible_scenes_rel_range(self, _aExtra = None):
aScenes = list(range(CoreHandler.m_nGuiVisScenes)) # using a list to be able to add _aExtra elements
return aScenes if (_aExtra == None) else aScenes + _aExtra
def scene_idx_rel(self, _nSceneIdxAbs):
return (_nSceneIdxAbs - CoreHandler.m_nGuiSceneOffset) % CoreHandler.m_nGuiVisScenes
def scene_idx_abs(self, _nSceneIdxRel):
return _nSceneIdxRel + CoreHandler.m_nGuiSceneOffset
def is_scene_visible(self, _nSceneIdxAbs):
nSceneIdxRel = _nSceneIdxAbs - CoreHandler.m_nGuiSceneOffset
return (nSceneIdxRel >= 0
and nSceneIdxRel < CoreHandler.m_nGuiVisScenes)
# clip, sends and returns accessors ********************
def is_clip_visible(self, _nTrackIdxAbs, _nSceneIdxAbs):
nTrackIdxRel = _nTrackIdxAbs - CoreHandler.m_nGuiTrackOffset
nSceneIdxRel = _nSceneIdxAbs - CoreHandler.m_nGuiSceneOffset
return (nTrackIdxRel >= 0
and nTrackIdxRel < CoreHandler.m_nGuiVisTracks
and nSceneIdxRel >= 0
and nSceneIdxRel < CoreHandler.m_nGuiVisScenes)
def gui_num_vis_sends(self):
return CoreHandler.m_nGuiVisSends
def gui_vis_sends_range(self):
return range(CoreHandler.m_nGuiVisSends)
def gui_num_vis_returns(self):
return CoreHandler.m_nGuiVisReturns
def gui_vis_returns_range(self):
return range(CoreHandler.m_nGuiVisReturns)
# ******************************************************
def track_indeces_list(self):
return list(CoreHandler.m_aTrackIndeces)
def scene_indeces_list(self):
return list(CoreHandler.m_aSceneIndeces)
def beats_indeces_list(self):
return list(CoreHandler.m_aBeatsIndeces)
def beats_list(self):
return list(CoreHandler.m_hBeats.keys())
def beat_value(self, _sBeat):
return CoreHandler.m_hBeats.get(_sBeat, 0.0)
# ******************************************************
# selected track to handle device control events
def sel_track_dev(self, _sChannel, _sTrackType = None, _nIdxAbs = -1):
if (_nIdxAbs == -1):
# check if is in remover mode
if (_sTrackType == 'none'):
CoreHandler.m_hDevTrackSel[_sChannel] = ['none', -1]
# is in getter mode
elif (_sTrackType == None):
return CoreHandler.m_hDevTrackSel[_sChannel]
# is in setter mode
else:
CoreHandler.m_hDevTrackSel[_sChannel] = [_sTrackType, _nIdxAbs]
def max_track_sels(self):
return CoreHandler.m_nMaxTrackSels
def max_return_sels(self):
return CoreHandler.m_nMaxReturnSels
# ******************************************************
def sel_track_fx(self, _sTrackType = None, _nIdxAbs = -1):
if (_nIdxAbs == -1):
# check if is in remover mode
if (_sTrackType == 'none'):
CoreHandler.m_aFxTrackSel = ['none', -1]
# is in getter mode
elif (_sTrackType == None):
return CoreHandler.m_aFxTrackSel
# is in setter mode
else:
CoreHandler.m_aFxTrackSel = [_sTrackType, _nIdxAbs]
# ******************************************************
def value_gui_to_param(self, _nGuiValue, _oParam):
if (_oParam.is_quantized == True):
# just copy the value for quantized params
nParamValue = _nGuiValue
else:
# adjust the value in case is continuous (not discrete)
nParamMin = _oParam.min
nParamMax = _oParam.max
nParamValue = (_nGuiValue * (nParamMax - nParamMin)) + nParamMin
return nParamValue # value between min and max
def value_param_to_gui(self, _nParamValue, _oParam):
if (_oParam.is_quantized == True):
# just copy the value for quantized params
nGuiValue = _nParamValue
else:
# adjust the value in case is continuous (not discrete)
nParamMin = _oParam.min
nParamMax = _oParam.max
nGuiValue = (_nParamValue - nParamMin) / (nParamMax - nParamMin)
return nGuiValue # value between 0 and 1
| 33.905325
| 125
| 0.576847
|
86d365a64e9c422a8dba7681b586fd4f8b5b6277
| 6,926
|
py
|
Python
|
cloudera-framework-assembly/src/main/assembly/root/lib/manager/python/environment.py
|
ggear/cloudera-framework
|
a1240d85aeacfb2c467d01032d04bdb72b9e55a7
|
[
"Apache-2.0"
] | 11
|
2015-07-01T02:34:37.000Z
|
2019-05-02T06:06:11.000Z
|
cloudera-framework-assembly/src/main/assembly/root/lib/manager/python/environment.py
|
ggear/cloudera-framework
|
a1240d85aeacfb2c467d01032d04bdb72b9e55a7
|
[
"Apache-2.0"
] | 3
|
2015-12-16T22:29:21.000Z
|
2018-01-02T06:54:06.000Z
|
cloudera-framework-assembly/src/main/assembly/root/lib/manager/python/environment.py
|
ggear/cloudera-framework
|
a1240d85aeacfb2c467d01032d04bdb72b9e55a7
|
[
"Apache-2.0"
] | 7
|
2016-06-19T10:30:38.000Z
|
2019-03-28T16:01:37.000Z
|
#!/usr/bin/env python
"""
Provide a flat list of cluster connectivity parameters
Usage: %s [options]
Options:
-h --help Show help
--host=<cm-server-host> Specify a Cloudera Manager Server host
Defaults to 'localhost'
--port=<cm-server-port> Override the default Cloudera Manager Server port
Defaults to '7180'
--user=<cm-server-user> The Cloudera Manager user
Defaults to 'admin'
--user=<cm-server-user-password> The Cloudera Manager user password
Defaults to 'admin'
--cluster_name=<cluster-name> Name the cluster to reflect on
Defaults to the zeroth cluster defined
--service_role_name=<service-role-type> Filter to apply to service and role name
Defaults to all roles defined in the cluster
--random_index=<true|false> For parameters that are multi-value, define singular value
via a random index rather than the zeroth element
Defaults to 'false'
"""
import getopt
import inspect
import logging
import sys
import textwrap
from random import randint
from cm_api import api_client
from cm_api.api_client import ApiResource
LOG = logging.getLogger(__name__)
MAN_API_VERSION = 16 # Do not use api_client.API_CURRENT_VERSION, it is often +1 current production version
def do_print_header():
print '###############################################################################'
print '# '
print '# Cluster Environment'
print '#'
print '###############################################################################'
print ''
def do_print_footer():
print '###############################################################################'
def do_print_line_item_manual(service_name, role_name, keys, values):
service_role_name = service_name.upper() + '_' + role_name.upper();
print '# ' + service_role_name
for key, value in zip(keys, values):
print service_role_name + '_' + key + '=' + value
print ''
return True
def do_print_line_item(api, service, service_role_name_filter, random_index, service_name, role_name, role_port_name, hosts, ports):
service_role_name = service_name.upper() + '_' + role_name.upper();
if service_role_name_filter is None or service_role_name_filter.upper() == service_role_name and service.type == service_name:
if len(hosts) == 0:
for role in service.get_roles_by_type(role_name):
host = api.get_host(role.hostRef.hostId).hostname
port = role.get_config('full')[role_port_name].value
if port is None:
port = role.get_config('full')[role_port_name].default
hosts.append(host)
ports.append(port)
if len(hosts) > 0:
index = 0
if str(random_index).title() == 'True':
index = randint(0, len(hosts) - 1)
print '# ' + service_role_name
print service_role_name + '_HOST=' + hosts[index]
print service_role_name + '_PORT=' + ports[index]
print service_role_name + '_HOSTS=' + ",".join(hosts)
print service_role_name + '_PORTS=' + ",".join(ports)
print service_role_name + '_HOSTS_AND_PORTS=' + ','.join([host + ':' + port for host, port in zip(hosts, ports)])
print ''
return True
return False
def do_call(host, port, user, password, cluster_name, service_role_name, random_index):
api = ApiResource(host, port, user, password, False, MAN_API_VERSION);
for cluster in api.get_all_clusters():
if cluster_name is None:
break
elif cluster_name == cluster.name:
break
if cluster_name is not None and cluster_name != cluster.name:
print >> sys.stderr, "Cloud not find cluster: " + cluster_name
return -2;
do_print_header()
for service in cluster.get_all_services():
do_print_line_item(api, service, service_role_name, random_index, 'HDFS', 'NAMENODE', 'namenode_port', [], [])
do_print_line_item(api, service, service_role_name, random_index, 'KUDU', 'KUDU_MASTER', 'webserver_port', [], [])
do_print_line_item(api, service, service_role_name, random_index, 'HUE', 'HUE_SERVER', 'hue_http_port', [], [])
do_print_line_item(api, service, service_role_name, random_index, 'HIVE', 'HIVESERVER2', 'hs2_thrift_address_port', [], [])
do_print_line_item(api, service, service_role_name, random_index, 'IMPALA', 'IMPALAD', 'beeswax_port', [], [])
do_print_line_item(api, service, service_role_name, random_index, 'FLUME', 'AGENT', 'agent_http_port', [], [])
do_print_line_item(api, service, service_role_name, random_index, 'KAFKA', 'KAFKA_BROKER', 'port', [], [])
do_print_line_item(api, service, service_role_name, random_index, 'ZOOKEEPER', 'SERVER', 'clientPort', [], [])
do_print_footer()
def usage():
doc = inspect.getmodule(usage).__doc__
print >> sys.stderr, textwrap.dedent(doc % (sys.argv[0],))
def setup_logging(level):
logging.basicConfig()
logging.getLogger().setLevel(level)
def main(argv):
setup_logging(logging.INFO)
host = 'localhost'
port = 7180
user = 'admin'
password = 'admin'
cluster_name = None
service_role_name = None
random_index = False
try:
opts, args = getopt.getopt(sys.argv[1:], "h",
["help", "host=", "port=", "user=", "password=", "random_index=", "cluster_name=", "service_role_name="])
except getopt.GetoptError, err:
print >> sys.stderr, err
usage()
return -1
for option, value in opts:
if option in ("-h", "--help"):
usage()
return -1
elif option in ("--host"):
host = value;
elif option in ("--port"):
port = value;
elif option in ("--user"):
user = value;
elif option in ("--password"):
password = value;
elif option in ("--random_index"):
random_index = value;
elif option in ("--cluster_name"):
cluster_name = value;
elif option in ("--service_role_name"):
service_role_name = value;
else:
print >> sys.stderr, "Unknown option or flag: " + option
usage()
return -1
do_call(host, port, user, password, cluster_name, service_role_name, random_index)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 42.490798
| 140
| 0.574069
|
1b6cdeb92d2787f82075304727b31637982bbfeb
| 1,967
|
py
|
Python
|
snakemake_workflow_template/release_modules/replace_identifiers/lib/arguments.py
|
regulondbunam/RegulonDB-MCO-Felipe
|
6e5546907399d5712a8e69f2f8320af8d30b9d7b
|
[
"MIT"
] | null | null | null |
snakemake_workflow_template/release_modules/replace_identifiers/lib/arguments.py
|
regulondbunam/RegulonDB-MCO-Felipe
|
6e5546907399d5712a8e69f2f8320af8d30b9d7b
|
[
"MIT"
] | null | null | null |
snakemake_workflow_template/release_modules/replace_identifiers/lib/arguments.py
|
regulondbunam/RegulonDB-MCO-Felipe
|
6e5546907399d5712a8e69f2f8320af8d30b9d7b
|
[
"MIT"
] | null | null | null |
import argparse
def load_arguments():
parser = argparse.ArgumentParser(description="Replaces the data's source identifiers with RegulonDB's identifiers.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog="Either Multigenomic or Datamarts should be selected through arguments options"
)
parser.add_argument(
"-org", "--organism",
help="organism name corresponding to the input data",
metavar="ECOLI",
required=True
)
parser.add_argument(
"-i", "--inputdir",
help="Input JSON data path whose source identifiers will be replaced with RegulonDB's identifiers",
metavar="/Users/user/Proyectos/RegulonDB/Results/source/ecocyc",
required=True
)
parser.add_argument(
"-o", "--outputdir",
help="Directory where the JSON files with the RegulonDB IDs replaced will be stored",
metavar="/Users/user/Proyectos/RegulonDB/Results/replaced_identifiers/multigenomic/ecocyc",
required=True
)
parser.add_argument(
"-u", "--url",
help="URL to establish a connection between the process and MongoDB",
metavar="mongodb://127.0.0.1:27017/regulondbidentifiers",
required=True
)
parser.add_argument(
"-l", "--log",
help="Path where the log is going to be saved.",
metavar="/Users/user/Proyectos/RegulonDB/Results/log",
)
parser.add_argument(
"-db", "--database",
choices=["regulondbmultigenomic", "regulondbht"],
help="database where the input data belongs",
metavar="regulondbmultigenomic",
required=True
)
parser.add_argument(
"-v", "--version",
help="RegulonDB's release version",
metavar="10.8",
required=True
)
arguments = parser.parse_args()
return arguments
| 32.245902
| 123
| 0.613625
|
2bc5b8e5380fac52bfaa8045af40040ac06e1b5d
| 1,982
|
py
|
Python
|
test/test_inline_response2007_message.py
|
RiskAmerica/api-client-python
|
468c554a0440bef5086828631e25d99d41e28571
|
[
"MIT"
] | null | null | null |
test/test_inline_response2007_message.py
|
RiskAmerica/api-client-python
|
468c554a0440bef5086828631e25d99d41e28571
|
[
"MIT"
] | null | null | null |
test/test_inline_response2007_message.py
|
RiskAmerica/api-client-python
|
468c554a0440bef5086828631e25d99d41e28571
|
[
"MIT"
] | 1
|
2021-04-14T15:52:03.000Z
|
2021-04-14T15:52:03.000Z
|
# coding: utf-8
"""
APIs RISKAMERICA
A continuación les presentamos la documentación las **APIs** **de** **RiskAmerica**, el cual es un servicio pagado ofrecido por RiskAmerica que se contrata por separado a nuestras otras ofertas de software. Algunas consideraciones que debe tener al momento de usar las APIs: - El APIKEY o Token lo puede conseguir solicitándolo al equipo comercial de RiskAmerica - El request necesita ser enviado con el header **Accept:** **application/json** para que responda en formato **JSON** (de no ser enviado con esto se responderá en formato **XML**) - Todos los Servicios son **REST** y sus parametros pueden ser enviados tanto en **POST** como **GET** - El uso de las APIs puede llevar un cobro asociado según se pacte en el acuerdo comercial, por lo que le recomendamos ser cuidadosos en el uso de éstas para evitar sobre-cargos innecesarios. - RiskAmerica funciona con un mecanismo de **WhiteList** **de** **IPs** para las consultas de las API. Para habilitar o modificar la lista de IPs permitidas debe contactarse al mail **contacto@riskamerica.com**. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import riam_api_client
from riam_api_client.models.inline_response2007_message import InlineResponse2007Message # noqa: E501
from riam_api_client.rest import ApiException
class TestInlineResponse2007Message(unittest.TestCase):
"""InlineResponse2007Message unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse2007Message(self):
"""Test InlineResponse2007Message"""
# FIXME: construct object with mandatory attributes with example values
# model = riam_api_client.models.inline_response2007_message.InlineResponse2007Message() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 49.55
| 1,070
| 0.750757
|
d6c0273a2adf56645a606f545fd038f7db980b6c
| 2,479
|
py
|
Python
|
parlai/core/logs.py
|
albert-magyar/ParlAI
|
3133b4e8290f8a42679b93a65fc25e76ac7f6761
|
[
"MIT"
] | null | null | null |
parlai/core/logs.py
|
albert-magyar/ParlAI
|
3133b4e8290f8a42679b93a65fc25e76ac7f6761
|
[
"MIT"
] | null | null | null |
parlai/core/logs.py
|
albert-magyar/ParlAI
|
3133b4e8290f8a42679b93a65fc25e76ac7f6761
|
[
"MIT"
] | 1
|
2020-11-04T06:13:59.000Z
|
2020-11-04T06:13:59.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Log metrics to tensorboard.
This file provides interface to log any metrics in tensorboard, could be
extended to any other tool like visdom.
.. code-block: none
tensorboard --logdir <PARLAI_DATA/tensorboard> --port 8888.
"""
import json
import numbers
from parlai.core.opt import Opt
from parlai.core.metrics import Metric
from parlai.utils.io import PathManager
import parlai.utils.logging as logging
class TensorboardLogger(object):
"""
Log objects to tensorboard.
"""
@staticmethod
def add_cmdline_args(argparser):
"""
Add tensorboard CLI args.
"""
logger = argparser.add_argument_group('Tensorboard Arguments')
logger.add_argument(
'-tblog',
'--tensorboard-log',
type='bool',
default=False,
help="Tensorboard logging of metrics, default is %(default)s",
hidden=False,
)
def __init__(self, opt: Opt):
try:
# tensorboard is a very expensive thing to import. Wait until the
# last second to import it.
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please run `pip install tensorboard tensorboardX`.')
tbpath = opt['model_file'] + '.tensorboard'
logging.debug(f'Saving tensorboard logs to: {tbpath}')
if not PathManager.exists(tbpath):
PathManager.mkdirs(tbpath)
self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))
def log_metrics(self, setting, step, report):
"""
Add all metrics from tensorboard_metrics opt key.
:param setting:
One of train/valid/test. Will be used as the title for the graph.
:param step:
Number of parleys
:param report:
The report to log
"""
for k, v in report.items():
if isinstance(v, numbers.Number):
self.writer.add_scalar(f'{k}/{setting}', v, global_step=step)
elif isinstance(v, Metric):
self.writer.add_scalar(f'{k}/{setting}', v.value(), global_step=step)
else:
logging.error(f'k {k} v {v} is not a number')
def flush(self):
self.writer.flush()
| 30.9875
| 85
| 0.621218
|
c9c53273a4434902758419f5bac37babc0d032fc
| 37,252
|
py
|
Python
|
official/recommendation/data_pipeline.py
|
denis-choi/models
|
b478430d89f0b40fe3caf281f4e24cefda825412
|
[
"Apache-2.0"
] | 3
|
2019-10-12T07:28:40.000Z
|
2019-10-16T00:58:10.000Z
|
official/recommendation/data_pipeline.py
|
denis-choi/models
|
b478430d89f0b40fe3caf281f4e24cefda825412
|
[
"Apache-2.0"
] | 6
|
2021-06-08T21:30:48.000Z
|
2022-03-12T00:29:00.000Z
|
official/recommendation/data_pipeline.py
|
denis-choi/models
|
b478430d89f0b40fe3caf281f4e24cefda825412
|
[
"Apache-2.0"
] | 7
|
2017-07-01T22:47:51.000Z
|
2021-05-15T10:48:22.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Asynchronous data producer for the NCF pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import functools
import os
import sys
import tempfile
import threading
import time
import timeit
import traceback
import typing
import numpy as np
import six
from six.moves import queue
import tensorflow as tf
from absl import logging
from official.recommendation import constants as rconst
from official.recommendation import movielens
from official.recommendation import popen_helper
from official.recommendation import stat_utils
SUMMARY_TEMPLATE = """General:
{spacer}Num users: {num_users}
{spacer}Num items: {num_items}
Training:
{spacer}Positive count: {train_pos_ct}
{spacer}Batch size: {train_batch_size} {multiplier}
{spacer}Batch count per epoch: {train_batch_ct}
Eval:
{spacer}Positive count: {eval_pos_ct}
{spacer}Batch size: {eval_batch_size} {multiplier}
{spacer}Batch count per epoch: {eval_batch_ct}"""
class DatasetManager(object):
"""Helper class for handling TensorFlow specific data tasks.
This class takes the (relatively) framework agnostic work done by the data
constructor classes and handles the TensorFlow specific portions (TFRecord
management, tf.Dataset creation, etc.).
"""
def __init__(self,
is_training,
stream_files,
batches_per_epoch,
shard_root=None,
deterministic=False,
num_train_epochs=None):
# type: (bool, bool, int, typing.Optional[str], bool, int) -> None
"""Constructs a `DatasetManager` instance.
Args:
is_training: Boolean of whether the data provided is training or
evaluation data. This determines whether to reuse the data
(if is_training=False) and the exact structure to use when storing and
yielding data.
stream_files: Boolean indicating whether data should be serialized and
written to file shards.
batches_per_epoch: The number of batches in a single epoch.
shard_root: The base directory to be used when stream_files=True.
deterministic: Forgo non-deterministic speedups. (i.e. sloppy=True)
num_train_epochs: Number of epochs to generate. If None, then each
call to `get_dataset()` increments the number of epochs requested.
"""
self._is_training = is_training
self._deterministic = deterministic
self._stream_files = stream_files
self._writers = []
self._write_locks = [threading.RLock() for _ in
range(rconst.NUM_FILE_SHARDS)] if stream_files else []
self._batches_per_epoch = batches_per_epoch
self._epochs_completed = 0
self._epochs_requested = num_train_epochs if num_train_epochs else 0
self._shard_root = shard_root
self._result_queue = queue.Queue()
self._result_reuse = []
@property
def current_data_root(self):
subdir = (rconst.TRAIN_FOLDER_TEMPLATE.format(self._epochs_completed)
if self._is_training else rconst.EVAL_FOLDER)
return os.path.join(self._shard_root, subdir)
def buffer_reached(self):
# Only applicable for training.
return (self._epochs_completed - self._epochs_requested >=
rconst.CYCLES_TO_BUFFER and self._is_training)
@staticmethod
def serialize(data):
"""Convert NumPy arrays into a TFRecords entry."""
def create_int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
feature_dict = {
k: create_int_feature(v.astype(np.int64)) for k, v in data.items()
}
return tf.train.Example(
features=tf.train.Features(feature=feature_dict)).SerializeToString()
@staticmethod
def deserialize(serialized_data, batch_size=None, is_training=True):
"""Convert serialized TFRecords into tensors.
Args:
serialized_data: A tensor containing serialized records.
batch_size: The data arrives pre-batched, so batch size is needed to
deserialize the data.
is_training: Boolean, whether data to deserialize to training data
or evaluation data.
"""
def _get_feature_map(batch_size, is_training=True):
"""Returns data format of the serialized tf record file."""
if is_training:
return {
movielens.USER_COLUMN:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
movielens.ITEM_COLUMN:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
rconst.VALID_POINT_MASK:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
"labels":
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64)
}
else:
return {
movielens.USER_COLUMN:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
movielens.ITEM_COLUMN:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64),
rconst.DUPLICATE_MASK:
tf.io.FixedLenFeature([batch_size, 1], dtype=tf.int64)
}
features = tf.io.parse_single_example(
serialized_data, _get_feature_map(batch_size, is_training=is_training))
users = tf.cast(features[movielens.USER_COLUMN], rconst.USER_DTYPE)
items = tf.cast(features[movielens.ITEM_COLUMN], rconst.ITEM_DTYPE)
if is_training:
valid_point_mask = tf.cast(features[rconst.VALID_POINT_MASK], tf.bool)
fake_dup_mask = tf.zeros_like(users)
return {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.VALID_POINT_MASK: valid_point_mask,
rconst.TRAIN_LABEL_KEY:
tf.reshape(tf.cast(features["labels"], tf.bool),
(batch_size, 1)),
rconst.DUPLICATE_MASK: fake_dup_mask
}
else:
labels = tf.cast(tf.zeros_like(users), tf.bool)
fake_valid_pt_mask = tf.cast(tf.zeros_like(users), tf.bool)
return {
movielens.USER_COLUMN:
users,
movielens.ITEM_COLUMN:
items,
rconst.DUPLICATE_MASK:
tf.cast(features[rconst.DUPLICATE_MASK], tf.bool),
rconst.VALID_POINT_MASK:
fake_valid_pt_mask,
rconst.TRAIN_LABEL_KEY:
labels
}
def put(self, index, data):
# type: (int, dict) -> None
"""Store data for later consumption.
Because there are several paths for storing and yielding data (queues,
lists, files) the data producer simply provides the data in a standard
format at which point the dataset manager handles storing it in the correct
form.
Args:
index: Used to select shards when writing to files.
data: A dict of the data to be stored. This method mutates data, and
therefore expects to be the only consumer.
"""
if self._is_training:
mask_start_index = data.pop(rconst.MASK_START_INDEX)
batch_size = data[movielens.ITEM_COLUMN].shape[0]
data[rconst.VALID_POINT_MASK] = np.expand_dims(
np.less(np.arange(batch_size), mask_start_index), -1)
if self._stream_files:
example_bytes = self.serialize(data)
with self._write_locks[index % rconst.NUM_FILE_SHARDS]:
self._writers[index % rconst.NUM_FILE_SHARDS].write(example_bytes)
else:
self._result_queue.put((
data, data.pop("labels")) if self._is_training else data)
def start_construction(self):
if self._stream_files:
tf.io.gfile.makedirs(self.current_data_root)
template = os.path.join(self.current_data_root, rconst.SHARD_TEMPLATE)
self._writers = [tf.io.TFRecordWriter(template.format(i))
for i in range(rconst.NUM_FILE_SHARDS)]
def end_construction(self):
if self._stream_files:
[writer.close() for writer in self._writers]
self._writers = []
self._result_queue.put(self.current_data_root)
self._epochs_completed += 1
def data_generator(self, epochs_between_evals):
"""Yields examples during local training."""
assert not self._stream_files
assert self._is_training or epochs_between_evals == 1
if self._is_training:
for _ in range(self._batches_per_epoch * epochs_between_evals):
yield self._result_queue.get(timeout=300)
else:
if self._result_reuse:
assert len(self._result_reuse) == self._batches_per_epoch
for i in self._result_reuse:
yield i
else:
# First epoch.
for _ in range(self._batches_per_epoch * epochs_between_evals):
result = self._result_queue.get(timeout=300)
self._result_reuse.append(result)
yield result
def increment_request_epoch(self):
self._epochs_requested += 1
def get_dataset(self, batch_size, epochs_between_evals):
"""Construct the dataset to be used for training and eval.
For local training, data is provided through Dataset.from_generator. For
remote training (TPUs) the data is first serialized to files and then sent
to the TPU through a StreamingFilesDataset.
Args:
batch_size: The per-replica batch size of the dataset.
epochs_between_evals: How many epochs worth of data to yield.
(Generator mode only.)
"""
self.increment_request_epoch()
if self._stream_files:
if epochs_between_evals > 1:
raise ValueError("epochs_between_evals > 1 not supported for file "
"based dataset.")
epoch_data_dir = self._result_queue.get(timeout=300)
if not self._is_training:
self._result_queue.put(epoch_data_dir) # Eval data is reused.
file_pattern = os.path.join(
epoch_data_dir, rconst.SHARD_TEMPLATE.format("*"))
# TODO(seemuch): remove this contrib import
# pylint: disable=line-too-long
from tensorflow.contrib.tpu.python.tpu.datasets import StreamingFilesDataset
# pylint: enable=line-too-long
dataset = StreamingFilesDataset(
files=file_pattern, worker_job=popen_helper.worker_job(),
num_parallel_reads=rconst.NUM_FILE_SHARDS, num_epochs=1,
sloppy=not self._deterministic)
map_fn = functools.partial(
self.deserialize,
batch_size=batch_size,
is_training=self._is_training)
dataset = dataset.map(map_fn, num_parallel_calls=16)
else:
types = {movielens.USER_COLUMN: rconst.USER_DTYPE,
movielens.ITEM_COLUMN: rconst.ITEM_DTYPE}
shapes = {
movielens.USER_COLUMN: tf.TensorShape([batch_size, 1]),
movielens.ITEM_COLUMN: tf.TensorShape([batch_size, 1])
}
if self._is_training:
types[rconst.VALID_POINT_MASK] = np.bool
shapes[rconst.VALID_POINT_MASK] = tf.TensorShape([batch_size, 1])
types = (types, np.bool)
shapes = (shapes, tf.TensorShape([batch_size, 1]))
else:
types[rconst.DUPLICATE_MASK] = np.bool
shapes[rconst.DUPLICATE_MASK] = tf.TensorShape([batch_size, 1])
data_generator = functools.partial(
self.data_generator, epochs_between_evals=epochs_between_evals)
dataset = tf.data.Dataset.from_generator(
generator=data_generator, output_types=types,
output_shapes=shapes)
return dataset.prefetch(16)
def make_input_fn(self, batch_size):
"""Create an input_fn which checks for batch size consistency."""
def input_fn(params):
"""Returns batches for training."""
# Estimator passes batch_size during training and eval_batch_size during
# eval. TPUEstimator only passes batch_size.
param_batch_size = (params["batch_size"] if self._is_training else
params.get("eval_batch_size") or params["batch_size"])
if batch_size != param_batch_size:
raise ValueError("producer batch size ({}) differs from params batch "
"size ({})".format(batch_size, param_batch_size))
epochs_between_evals = (params.get("epochs_between_evals", 1)
if self._is_training else 1)
return self.get_dataset(batch_size=batch_size,
epochs_between_evals=epochs_between_evals)
return input_fn
class BaseDataConstructor(threading.Thread):
"""Data constructor base class.
This class manages the control flow for constructing data. It is not meant
to be used directly, but instead subclasses should implement the following
two methods:
self.construct_lookup_variables
self.lookup_negative_items
"""
def __init__(
self,
maximum_number_epochs, # type: int
num_users, # type: int
num_items, # type: int
user_map, # type: dict
item_map, # type: dict
train_pos_users, # type: np.ndarray
train_pos_items, # type: np.ndarray
train_batch_size, # type: int
batches_per_train_step, # type: int
num_train_negatives, # type: int
eval_pos_users, # type: np.ndarray
eval_pos_items, # type: np.ndarray
eval_batch_size, # type: int
batches_per_eval_step, # type: int
stream_files, # type: bool
deterministic=False, # type: bool
epoch_dir=None, # type: str
num_train_epochs=None, # type: int
create_data_offline=False # type: bool
):
# General constants
self._maximum_number_epochs = maximum_number_epochs
self._num_users = num_users
self._num_items = num_items
self.user_map = user_map
self.item_map = item_map
self._train_pos_users = train_pos_users
self._train_pos_items = train_pos_items
self.train_batch_size = train_batch_size
self._num_train_negatives = num_train_negatives
self._batches_per_train_step = batches_per_train_step
self._eval_pos_users = eval_pos_users
self._eval_pos_items = eval_pos_items
self.eval_batch_size = eval_batch_size
self.num_train_epochs = num_train_epochs
self.create_data_offline = create_data_offline
# Training
if self._train_pos_users.shape != self._train_pos_items.shape:
raise ValueError(
"User positives ({}) is different from item positives ({})".format(
self._train_pos_users.shape, self._train_pos_items.shape))
(self._train_pos_count,) = self._train_pos_users.shape
self._elements_in_epoch = (1 + num_train_negatives) * self._train_pos_count
self.train_batches_per_epoch = self._count_batches(
self._elements_in_epoch, train_batch_size, batches_per_train_step)
# Evaluation
if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES):
raise ValueError("Eval batch size {} is not divisible by {}".format(
eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_users_per_batch = int(
eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))
self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)
self.eval_batches_per_epoch = self._count_batches(
self._eval_elements_in_epoch, eval_batch_size, batches_per_eval_step)
# Intermediate artifacts
self._current_epoch_order = np.empty(shape=(0,))
self._shuffle_iterator = None
self._shuffle_with_forkpool = not stream_files
if stream_files:
self._shard_root = epoch_dir or tempfile.mkdtemp(prefix="ncf_")
atexit.register(tf.io.gfile.rmtree, dirname=self._shard_root)
else:
self._shard_root = None
self._train_dataset = DatasetManager(True, stream_files,
self.train_batches_per_epoch,
self._shard_root, deterministic,
num_train_epochs)
self._eval_dataset = DatasetManager(False, stream_files,
self.eval_batches_per_epoch,
self._shard_root, deterministic,
num_train_epochs)
# Threading details
super(BaseDataConstructor, self).__init__()
self.daemon = True
self._stop_loop = False
self._fatal_exception = None
self.deterministic = deterministic
def __str__(self):
multiplier = ("(x{} devices)".format(self._batches_per_train_step)
if self._batches_per_train_step > 1 else "")
summary = SUMMARY_TEMPLATE.format(
spacer=" ", num_users=self._num_users, num_items=self._num_items,
train_pos_ct=self._train_pos_count,
train_batch_size=self.train_batch_size,
train_batch_ct=self.train_batches_per_epoch,
eval_pos_ct=self._num_users, eval_batch_size=self.eval_batch_size,
eval_batch_ct=self.eval_batches_per_epoch, multiplier=multiplier)
return super(BaseDataConstructor, self).__str__() + "\n" + summary
@staticmethod
def _count_batches(example_count, batch_size, batches_per_step):
"""Determine the number of batches, rounding up to fill all devices."""
x = (example_count + batch_size - 1) // batch_size
return (x + batches_per_step - 1) // batches_per_step * batches_per_step
def stop_loop(self):
self._stop_loop = True
def construct_lookup_variables(self):
"""Perform any one time pre-compute work."""
raise NotImplementedError
def lookup_negative_items(self, **kwargs):
"""Randomly sample negative items for given users."""
raise NotImplementedError
def _run(self):
atexit.register(self.stop_loop)
self._start_shuffle_iterator()
self.construct_lookup_variables()
self._construct_training_epoch()
self._construct_eval_epoch()
for _ in range(self._maximum_number_epochs - 1):
self._construct_training_epoch()
self.stop_loop()
def run(self):
try:
self._run()
except Exception as e:
# The Thread base class swallows stack traces, so unfortunately it is
# necessary to catch and re-raise to get debug output
traceback.print_exc()
self._fatal_exception = e
sys.stderr.flush()
raise
def _start_shuffle_iterator(self):
if self._shuffle_with_forkpool:
pool = popen_helper.get_forkpool(3, closing=False)
else:
pool = popen_helper.get_threadpool(1, closing=False)
atexit.register(pool.close)
args = [(self._elements_in_epoch, stat_utils.random_int32())
for _ in range(self._maximum_number_epochs)]
imap = pool.imap if self.deterministic else pool.imap_unordered
self._shuffle_iterator = imap(stat_utils.permutation, args)
def _get_training_batch(self, i):
"""Construct a single batch of training data.
Args:
i: The index of the batch. This is used when stream_files=True to assign
data to file shards.
"""
batch_indices = self._current_epoch_order[i * self.train_batch_size:
(i + 1) * self.train_batch_size]
(mask_start_index,) = batch_indices.shape
batch_ind_mod = np.mod(batch_indices, self._train_pos_count)
users = self._train_pos_users[batch_ind_mod]
negative_indices = np.greater_equal(batch_indices, self._train_pos_count)
negative_users = users[negative_indices]
negative_items = self.lookup_negative_items(negative_users=negative_users)
items = self._train_pos_items[batch_ind_mod]
items[negative_indices] = negative_items
labels = np.logical_not(negative_indices)
# Pad last partial batch
pad_length = self.train_batch_size - mask_start_index
if pad_length:
# We pad with arange rather than zeros because the network will still
# compute logits for padded examples, and padding with zeros would create
# a very "hot" embedding key which can have performance implications.
user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users
item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items
label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype)
users = np.concatenate([users, user_pad])
items = np.concatenate([items, item_pad])
labels = np.concatenate([labels, label_pad])
self._train_dataset.put(
i, {
movielens.USER_COLUMN:
np.reshape(users, (self.train_batch_size, 1)),
movielens.ITEM_COLUMN:
np.reshape(items, (self.train_batch_size, 1)),
rconst.MASK_START_INDEX:
np.array(mask_start_index, dtype=np.int32),
"labels":
np.reshape(labels, (self.train_batch_size, 1)),
})
def _wait_to_construct_train_epoch(self):
count = 0
while self._train_dataset.buffer_reached() and not self._stop_loop:
time.sleep(0.01)
count += 1
if count >= 100 and np.log10(count) == np.round(np.log10(count)):
logging.info(
"Waited {} times for training data to be consumed".format(count))
def _construct_training_epoch(self):
"""Loop to construct a batch of training data."""
if not self.create_data_offline:
self._wait_to_construct_train_epoch()
start_time = timeit.default_timer()
if self._stop_loop:
return
self._train_dataset.start_construction()
map_args = list(range(self.train_batches_per_epoch))
self._current_epoch_order = next(self._shuffle_iterator)
get_pool = (popen_helper.get_fauxpool if self.deterministic else
popen_helper.get_threadpool)
with get_pool(6) as pool:
pool.map(self._get_training_batch, map_args)
self._train_dataset.end_construction()
logging.info("Epoch construction complete. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
@staticmethod
def _assemble_eval_batch(users, positive_items, negative_items,
users_per_batch):
"""Construct duplicate_mask and structure data accordingly.
The positive items should be last so that they lose ties. However, they
should not be masked out if the true eval positive happens to be
selected as a negative. So instead, the positive is placed in the first
position, and then switched with the last element after the duplicate
mask has been computed.
Args:
users: An array of users in a batch. (should be identical along axis 1)
positive_items: An array (batch_size x 1) of positive item indices.
negative_items: An array of negative item indices.
users_per_batch: How many users should be in the batch. This is passed
as an argument so that ncf_test.py can use this method.
Returns:
User, item, and duplicate_mask arrays.
"""
items = np.concatenate([positive_items, negative_items], axis=1)
# We pad the users and items here so that the duplicate mask calculation
# will include padding. The metric function relies on all padded elements
# except the positive being marked as duplicate to mask out padded points.
if users.shape[0] < users_per_batch:
pad_rows = users_per_batch - users.shape[0]
padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32)
users = np.concatenate([users, padding.astype(users.dtype)], axis=0)
items = np.concatenate([items, padding.astype(items.dtype)], axis=0)
duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.bool)
items[:, (0, -1)] = items[:, (-1, 0)]
duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)]
assert users.shape == items.shape == duplicate_mask.shape
return users, items, duplicate_mask
def _get_eval_batch(self, i):
"""Construct a single batch of evaluation data.
Args:
i: The index of the batch.
"""
low_index = i * self._eval_users_per_batch
high_index = (i + 1) * self._eval_users_per_batch
users = np.repeat(self._eval_pos_users[low_index:high_index, np.newaxis],
1 + rconst.NUM_EVAL_NEGATIVES, axis=1)
positive_items = self._eval_pos_items[low_index:high_index, np.newaxis]
negative_items = (self.lookup_negative_items(negative_users=users[:, :-1])
.reshape(-1, rconst.NUM_EVAL_NEGATIVES))
users, items, duplicate_mask = self._assemble_eval_batch(
users, positive_items, negative_items, self._eval_users_per_batch)
self._eval_dataset.put(
i, {
movielens.USER_COLUMN:
np.reshape(users.flatten(), (self.eval_batch_size, 1)),
movielens.ITEM_COLUMN:
np.reshape(items.flatten(), (self.eval_batch_size, 1)),
rconst.DUPLICATE_MASK:
np.reshape(duplicate_mask.flatten(), (self.eval_batch_size, 1)),
})
def _construct_eval_epoch(self):
"""Loop to construct data for evaluation."""
if self._stop_loop:
return
start_time = timeit.default_timer()
self._eval_dataset.start_construction()
map_args = [i for i in range(self.eval_batches_per_epoch)]
get_pool = (popen_helper.get_fauxpool if self.deterministic else
popen_helper.get_threadpool)
with get_pool(6) as pool:
pool.map(self._get_eval_batch, map_args)
self._eval_dataset.end_construction()
logging.info("Eval construction complete. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
def make_input_fn(self, is_training):
# It isn't feasible to provide a foolproof check, so this is designed to
# catch most failures rather than provide an exhaustive guard.
if self._fatal_exception is not None:
raise ValueError("Fatal exception in the data production loop: {}"
.format(self._fatal_exception))
return (
self._train_dataset.make_input_fn(self.train_batch_size) if is_training
else self._eval_dataset.make_input_fn(self.eval_batch_size))
def increment_request_epoch(self):
self._train_dataset.increment_request_epoch()
class DummyConstructor(threading.Thread):
"""Class for running with synthetic data."""
def __init__(self, *args, **kwargs):
super(DummyConstructor, self).__init__(*args, **kwargs)
self.train_batches_per_epoch = rconst.SYNTHETIC_BATCHES_PER_EPOCH
self.eval_batches_per_epoch = rconst.SYNTHETIC_BATCHES_PER_EPOCH
def run(self):
pass
def stop_loop(self):
pass
def increment_request_epoch(self):
pass
@staticmethod
def make_input_fn(is_training):
"""Construct training input_fn that uses synthetic data."""
def input_fn(params):
"""Returns dummy input batches for training."""
# Estimator passes batch_size during training and eval_batch_size during
# eval. TPUEstimator only passes batch_size.
batch_size = (params["batch_size"] if is_training else
params.get("eval_batch_size") or params["batch_size"])
num_users = params["num_users"]
num_items = params["num_items"]
users = tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=num_users)
items = tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=num_items)
if is_training:
valid_point_mask = tf.cast(
tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=2), tf.bool)
labels = tf.cast(
tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.VALID_POINT_MASK: valid_point_mask,
}, labels
else:
dupe_mask = tf.cast(
tf.random.uniform([batch_size, 1],
dtype=tf.int32,
minval=0,
maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.DUPLICATE_MASK: dupe_mask,
}
dataset = tf.data.Dataset.from_tensors(data).repeat(
rconst.SYNTHETIC_BATCHES_PER_EPOCH * params["batches_per_step"])
dataset = dataset.prefetch(32)
return dataset
return input_fn
class MaterializedDataConstructor(BaseDataConstructor):
"""Materialize a table of negative examples for fast negative generation.
This class creates a table (num_users x num_items) containing all of the
negative examples for each user. This table is conceptually ragged; that is to
say the items dimension will have a number of unused elements at the end equal
to the number of positive elements for a given user. For instance:
num_users = 3
num_items = 5
positives = [[1, 3], [0], [1, 2, 3, 4]]
will generate a negative table:
[
[0 2 4 int32max int32max],
[1 2 3 4 int32max],
[0 int32max int32max int32max int32max],
]
and a vector of per-user negative counts, which in this case would be:
[3, 4, 1]
When sampling negatives, integers are (nearly) uniformly selected from the
range [0, per_user_neg_count[user]) which gives a column_index, at which
point the negative can be selected as:
negative_table[user, column_index]
This technique will not scale; however MovieLens is small enough that even
a pre-compute which is quadratic in problem size will still fit in memory. A
more scalable lookup method is in the works.
"""
def __init__(self, *args, **kwargs):
super(MaterializedDataConstructor, self).__init__(*args, **kwargs)
self._negative_table = None
self._per_user_neg_count = None
def construct_lookup_variables(self):
# Materialize negatives for fast lookup sampling.
start_time = timeit.default_timer()
inner_bounds = np.argwhere(self._train_pos_users[1:] -
self._train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = self._train_pos_users.shape
index_bounds = [0] + inner_bounds.tolist() + [upper_bound]
self._negative_table = np.zeros(shape=(self._num_users, self._num_items),
dtype=rconst.ITEM_DTYPE)
# Set the table to the max value to make sure the embedding lookup will fail
# if we go out of bounds, rather than just overloading item zero.
self._negative_table += np.iinfo(rconst.ITEM_DTYPE).max
assert self._num_items < np.iinfo(rconst.ITEM_DTYPE).max
# Reuse arange during generation. np.delete will make a copy.
full_set = np.arange(self._num_items, dtype=rconst.ITEM_DTYPE)
self._per_user_neg_count = np.zeros(
shape=(self._num_users,), dtype=np.int32)
# Threading does not improve this loop. For some reason, the np.delete
# call does not parallelize well. Multiprocessing incurs too much
# serialization overhead to be worthwhile.
for i in range(self._num_users):
positives = self._train_pos_items[index_bounds[i]:index_bounds[i+1]]
negatives = np.delete(full_set, positives)
self._per_user_neg_count[i] = self._num_items - positives.shape[0]
self._negative_table[i, :self._per_user_neg_count[i]] = negatives
logging.info("Negative sample table built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
def lookup_negative_items(self, negative_users, **kwargs):
negative_item_choice = stat_utils.very_slightly_biased_randint(
self._per_user_neg_count[negative_users])
return self._negative_table[negative_users, negative_item_choice]
class BisectionDataConstructor(BaseDataConstructor):
"""Use bisection to index within positive examples.
This class tallies the number of negative items which appear before each
positive item for a user. This means that in order to select the ith negative
item for a user, it only needs to determine which two positive items bound
it at which point the item id for the ith negative is a simply algebraic
expression.
"""
def __init__(self, *args, **kwargs):
super(BisectionDataConstructor, self).__init__(*args, **kwargs)
self.index_bounds = None
self._sorted_train_pos_items = None
self._total_negatives = None
def _index_segment(self, user):
lower, upper = self.index_bounds[user:user+2]
items = self._sorted_train_pos_items[lower:upper]
negatives_since_last_positive = np.concatenate(
[items[0][np.newaxis], items[1:] - items[:-1] - 1])
return np.cumsum(negatives_since_last_positive)
def construct_lookup_variables(self):
start_time = timeit.default_timer()
inner_bounds = np.argwhere(self._train_pos_users[1:] -
self._train_pos_users[:-1])[:, 0] + 1
(upper_bound,) = self._train_pos_users.shape
self.index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])
# Later logic will assume that the users are in sequential ascending order.
assert np.array_equal(self._train_pos_users[self.index_bounds[:-1]],
np.arange(self._num_users))
self._sorted_train_pos_items = self._train_pos_items.copy()
for i in range(self._num_users):
lower, upper = self.index_bounds[i:i+2]
self._sorted_train_pos_items[lower:upper].sort()
self._total_negatives = np.concatenate([
self._index_segment(i) for i in range(self._num_users)])
logging.info("Negative total vector built. Time: {:.1f} seconds".format(
timeit.default_timer() - start_time))
def lookup_negative_items(self, negative_users, **kwargs):
output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1
left_index = self.index_bounds[negative_users]
right_index = self.index_bounds[negative_users + 1] - 1
num_positives = right_index - left_index + 1
num_negatives = self._num_items - num_positives
neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives)
# Shortcuts:
# For points where the negative is greater than or equal to the tally before
# the last positive point there is no need to bisect. Instead the item id
# corresponding to the negative item choice is simply:
# last_postive_index + 1 + (neg_choice - last_negative_tally)
# Similarly, if the selection is less than the tally at the first positive
# then the item_id is simply the selection.
#
# Because MovieLens organizes popular movies into low integers (which is
# preserved through the preprocessing), the first shortcut is very
# efficient, allowing ~60% of samples to bypass the bisection. For the same
# reason, the second shortcut is rarely triggered (<0.02%) and is therefore
# not worth implementing.
use_shortcut = neg_item_choice >= self._total_negatives[right_index]
output[use_shortcut] = (
self._sorted_train_pos_items[right_index] + 1 +
(neg_item_choice - self._total_negatives[right_index])
)[use_shortcut]
if np.all(use_shortcut):
# The bisection code is ill-posed when there are no elements.
return output
not_use_shortcut = np.logical_not(use_shortcut)
left_index = left_index[not_use_shortcut]
right_index = right_index[not_use_shortcut]
neg_item_choice = neg_item_choice[not_use_shortcut]
num_loops = np.max(
np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32))
for i in range(num_loops):
mid_index = (left_index + right_index) // 2
right_criteria = self._total_negatives[mid_index] > neg_item_choice
left_criteria = np.logical_not(right_criteria)
right_index[right_criteria] = mid_index[right_criteria]
left_index[left_criteria] = mid_index[left_criteria]
# Expected state after bisection pass:
# The right index is the smallest index whose tally is greater than the
# negative item choice index.
assert np.all((right_index - left_index) <= 1)
output[not_use_shortcut] = (
self._sorted_train_pos_items[right_index] -
(self._total_negatives[right_index] - neg_item_choice)
)
assert np.all(output >= 0)
return output
def get_constructor(name):
if name == "bisection":
return BisectionDataConstructor
if name == "materialized":
return MaterializedDataConstructor
raise ValueError("Unrecognized constructor: {}".format(name))
| 38.723493
| 82
| 0.679963
|
139f1a357b258158eb71195d5eb8a97e707a810f
| 20,322
|
py
|
Python
|
shot_chart/core.py
|
theccalderon/shot_chart
|
97e4bd5da920bb894059c648d93e73743887d6e7
|
[
"Apache-2.0"
] | 16
|
2020-04-14T11:48:14.000Z
|
2020-12-16T03:32:48.000Z
|
shot_chart/core.py
|
theccalderon/shot_chart
|
97e4bd5da920bb894059c648d93e73743887d6e7
|
[
"Apache-2.0"
] | 4
|
2020-03-31T11:13:26.000Z
|
2021-05-20T12:45:17.000Z
|
shot_chart/core.py
|
theccalderon/shot_chart
|
97e4bd5da920bb894059c648d93e73743887d6e7
|
[
"Apache-2.0"
] | 8
|
2020-10-14T15:17:04.000Z
|
2021-12-21T12:03:42.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['Config', 'URLs', 'download_url', 'download_data', 'file_extract', 'untar_data', 'make_df', 'delegates',
'Shots', 'list_teams', 'list_game_ids', 'TeamShots', 'list_team_players', 'PlayerShots']
# Cell
from pathlib import Path
import os
import yaml
import pandas as pd
import requests
import tarfile
from fastprogress.fastprogress import master_bar, progress_bar
import matplotlib.pyplot as plt
from typing import Sequence, Tuple, TypeVar, Union
from typing import Any, AnyStr, Callable, Collection, Dict, Hashable, Iterator, List, Mapping, NewType, Optional
import boto3
import hashlib
import shutil
import numpy as np
import inspect
# Cell
class Config:
config_path = Path(os.getenv('SHOTCHART_HOME', '~/.shot_chart')).expanduser()
config_file = config_path/'config.yml'
def __init__(self):
self.config_path.mkdir(parents=True, exist_ok=True)
if not self.config_file.exists(): self.create_config()
self.d = self.load_config()
def __getitem__(self,k):
k = k.lower()
if k not in self.d: k = k+'_path'
if k in ["my_dpi","fig_height","fig_width"]:
return self.d[k]
return Path(self.d[k])
def __getattr__(self,k):
if k=='d': raise AttributeError
return self[k]
def __setitem__(self,k,v): self.d[k] = str(v)
def __contains__(self,k): return k in self.d
def load_config(self):
with open(self.config_file, 'r') as f:
config = yaml.safe_load(f)
if 'version' in config and config['version'] == 1: return config
self.create_config()
return self.load_config()
def create_config(self):
F = plt.gcf()
# Now check everything with the defaults:
DPI = F.get_dpi()
config = {'data_path': str(self.config_path/'data'),
'archive_path': str(self.config_path/'archive'),
'version': 1,
'my_dpi': DPI,
'fig_height': 472,
'fig_width': 500
}
self.save_file(config)
def save(self): self.save_file(self.d)
def save_file(self, config):
with self.config_file.open('w') as f: yaml.dump(config, f, default_flow_style=False)
# Cell
class URLs():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
S3 = 'https://nba-shot-charts.s3.amazonaws.com/'
SHOTS_2000 = f'{S3}shots-2000.tgz'
SHOTS_2001 = f'{S3}shots-2001.tgz'
SHOTS_2002 = f'{S3}shots-2002.tgz'
SHOTS_2003 = f'{S3}shots-2003.tgz'
SHOTS_2004 = f'{S3}shots-2004.tgz'
SHOTS_2005 = f'{S3}shots-2005.tgz'
SHOTS_2006 = f'{S3}shots-2006.tgz'
SHOTS_2007 = f'{S3}shots-2007.tgz'
SHOTS_2008 = f'{S3}shots-2008.tgz'
SHOTS_2009 = f'{S3}shots-2009.tgz'
SHOTS_2010 = f'{S3}shots-2010.tgz'
SHOTS_2011 = f'{S3}shots-2011.tgz'
SHOTS_2012 = f'{S3}shots-2012.tgz'
SHOTS_2013 = f'{S3}shots-2013.tgz'
SHOTS_2014 = f'{S3}shots-2014.tgz'
SHOTS_2015 = f'{S3}shots-2015.tgz'
SHOTS_2016 = f'{S3}shots-2016.tgz'
SHOTS_2017 = f'{S3}shots-2017.tgz'
SHOTS_2018 = f'{S3}shots-2018.tgz'
SHOTS_2019 = f'{S3}shots-2019.tgz'
SHOTS_2020 = f'{S3}shots-2020.tgz'
def path(url, c_key='archive'):
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('data' if c_key=='data' else 'archive')/fname
if local_path.exists(): return local_path
return Config()[c_key]/fname
# Cell
def download_url(url, dest, overwrite=False, pbar=None, show_progress=True, chunk_size=1024*1024,
timeout=4, retries=5):
"Download `url` to `dest` unless it exists and not `overwrite`"
if os.path.exists(dest) and not overwrite: return
s = requests.Session()
s.mount('http://',requests.adapters.HTTPAdapter(max_retries=retries))
# additional line to identify as a firefox browser, see fastai/#2438
s.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'})
u = s.get(url, stream=True, timeout=timeout)
try: file_size = int(u.headers["Content-Length"])
except: show_progress = False
with open(dest, 'wb') as f:
nbytes = 0
if show_progress: pbar = progress_bar(range(file_size), leave=False, parent=pbar)
try:
if show_progress: pbar.update(0)
for chunk in u.iter_content(chunk_size=chunk_size):
nbytes += len(chunk)
if show_progress: pbar.update(nbytes)
f.write(chunk)
except requests.exceptions.ConnectionError as e:
fname = url.split('/')[-1]
data_dir = dest.parent
print(f'\n Download of {url} has failed after {retries} retries\n'
f' Fix the download manually:\n'
f'$ mkdir -p {data_dir}\n'
f'$ cd {data_dir}\n'
f'$ wget -c {url}\n'
f'$ tar xf {fname}\n'
f' And re-run your code once the download is successful\n')
# Cell
def download_data(url, fname=None, c_key='archive', force_download=False):
"Download `url` to `fname`."
fname = Path(fname or URLs.path(url, c_key=c_key))
fname.parent.mkdir(parents=True, exist_ok=True)
if not fname.exists() or force_download: download_url(url, fname, overwrite=force_download)
return fname
# Cell
def file_extract(fname, dest='.'):
"Extract `fname` to `dest` using `tarfile` or `zipfile"
fname = str(fname)
if fname.endswith('gz'): tarfile.open(fname, 'r:gz').extractall(dest)
elif fname.endswith('zip'): zipfile.ZipFile(fname ).extractall(dest)
else: raise Exception(f'Unrecognized archive: {fname}')
# Cell
def _get_check(url):
s3 = boto3.client('s3')
s3_resp = s3.head_object(Bucket=URLs.S3.split(".")[0].split("//")[1],Key=url.split("/")[-1])
ETag = s3_resp['ETag'].strip('"')
# Open,close, read file and calculate MD5 on its contents
with open(Path(URLs.path(url)),"rb") as file_to_check:
# read contents of the file
data = file_to_check.read()
# pipe contents of the file through
md5_returned = hashlib.md5(data).hexdigest()
return ETag != md5_returned
# Cell
def untar_data(url, fname=None, dest=None, c_key='data', force_download=False, extract_func=file_extract):
"Download `url` to `fname` if `dest` doesn't exist, and un-tgz to folder `dest`."
default_dest = URLs.path(url, c_key=c_key).with_suffix('.csv')
dest = default_dest if dest is None else Path(dest)/default_dest.name
fname = Path(fname or URLs.path(url))
if fname.exists() and _get_check(url):
print("A new version of this dataset is available, downloading...")
force_download = True
if force_download:
if fname.exists(): os.remove(fname)
if dest.exists(): os.remove(dest)
if not dest.exists():
fname = download_data(url, fname=fname, c_key=c_key)
extract_func(fname, dest.parent)
return dest
# Cell
def make_df(path):
"Creates a pandas dataframe from `path`"
return pd.read_csv(path)
# Cell
def delegates(to=None, keep=False):
"Decorator: replace `**kwargs` in signature with params from `to`"
def _f(f):
if to is None: to_f,from_f = f.__base__.__init__,f.__init__
else: to_f,from_f = to,f
sig = inspect.signature(from_f)
sigd = dict(sig.parameters)
k = sigd.pop('kwargs')
s2 = {k:v for k,v in inspect.signature(to_f).parameters.items()
if v.default != inspect.Parameter.empty and k not in sigd}
sigd.update(s2)
if keep: sigd['kwargs'] = k
from_f.__signature__ = sig.replace(parameters=sigd.values())
return f
return _f
# Cell
class Shots:
"Plots shot chart and most/least effective shots using `plot_shots` and `plot_effective`"
def __init__(self, dataframe):
self.dataframe = dataframe
self.__X_MODIFIER = 10
self.__Y_MODIFIER = 454
@property
def teams(self):
return self.dataframe['team'].drop_duplicates()
@property
def players(self):
return self.dataframe['shots_by'].drop_duplicates()
@property
def fg_pct(self):
return self.__calculate_metric(self.dataframe,"fg")
@property
def efg_pct(self):
return self.__calculate_metric(self.dataframe,"efg")
def __calculate_metric(self, dataframe, metric="efg"):
if len(dataframe)==0:
return 0
if metric == "fg":
return round(len(dataframe.loc[dataframe['outcome']=='made'])/len(dataframe),2)
else:
return round( (len(dataframe.loc[dataframe['outcome']=='made']) + 0.5 *len(dataframe.loc[(dataframe['outcome']=='made') & (dataframe['attempt']=='3-pointer')]))/len(dataframe),2)
def __plot_shot_chart(self, dataframe, metric:str="efg",attempt:str="all", distance_limit:Union[int,tuple]=29):
if type(distance_limit) == int:
distances = [str(x)+"ft" for x in range(distance_limit+1)]
else:
distances = [str(x)+"ft" for x in range(distance_limit[0],distance_limit[1]+1)]
plt.figure(figsize=(2 * Config().fig_height/Config().my_dpi, Config().fig_width/Config().my_dpi), dpi=Config().my_dpi)
ax = plt.subplot(1, 2, 1)
plt.title("Shot chart")
img = plt.imread("http://d2p3bygnnzw9w3.cloudfront.net/req/1/images/bbr/nbahalfcourt.png")
implot = plt.imshow(img, extent=[0,500,0,472])
if attempt == "2-pointer":
shots_df = dataframe.loc[(dataframe["attempt"]=="2-pointer") & (dataframe["distance"].isin(distances))]
elif attempt == "3-pointer":
shots_df = dataframe.loc[(dataframe["attempt"]=="3-pointer") & (dataframe["distance"].isin(distances))]
else:
shots_df = dataframe
if len(shots_df) > 200:
# print(len(shots_df))
ys = shots_df['x'].apply(lambda x: x.split("px")[0]).to_list()
ys = [self.__Y_MODIFIER - int(x) for x in ys]
xs = shots_df['y'].apply(lambda x: int(x.split("px")[0])).to_list()
xs = [x + self.__X_MODIFIER for x in xs]
mycmap = plt.cm.Reds
mycmap._init()
mycmap._lut[:,-1] = np.linspace(0, 0.8, 256+3)
plt.hexbin(xs, ys, gridsize=(50,47), bins='log',cmap=mycmap)
# cb = plt.colorbar(label='count in bin')
else:
made_df = shots_df.loc[shots_df['outcome']=="made"]
missed_df = shots_df.loc[shots_df['outcome']=="missed"]
ys = made_df['x'].apply(lambda x: x.split("px")[0]).to_list()
ys = [self.__Y_MODIFIER - int(x) for x in ys]
xs = made_df['y'].apply(lambda x: int(x.split("px")[0])).to_list()
xs = [x + self.__X_MODIFIER for x in xs]
# if most_or_least == 'most':
plt.scatter(xs, ys,c='g',marker='o',s=10, alpha=1)
# else:
# plt.scatter(xs, ys,c='g',marker='o',s=10, alpha=0.2)
ys = missed_df['x'].apply(lambda x: x.split("px")[0]).to_list()
ys = [self.__Y_MODIFIER - int(x) for x in ys]
xs = missed_df['y'].apply(lambda x: int(x.split("px")[0])).to_list()
xs = [x + self.__X_MODIFIER for x in xs]
# if most_or_least == 'least':
# plt.scatter(xs, ys,c='g',marker='o',s=10, alpha=1)
# else:
plt.scatter(xs, ys,c='orange',marker='x',s=10, alpha=0.5)
# plt.scatter(xs, ys,c='orange',marker='x',s=10)
return
def __plot_hist_volume(self, dataframe, fg_pct:float, efg_pct:float, most_or_least:str=None, final_distance:str=None, final_attempt:str=None,made:bool=True,missed:bool=True):
if made:
ax = plt.subplot(1, 2, 2)
plt.title("Shot distribution - all distances")
distances_all_shots = dataframe['distance'].apply(lambda x: int(x.split('ft')[0])).to_list()
make_shots = dataframe.loc[dataframe['outcome']=='made']['distance'].apply(lambda x: int(x.split('ft')[0])).to_list()
shots_to_plot = [make_shots,distances_all_shots]
plt.hist(shots_to_plot, bins = range( 0, max(distances_all_shots)+1, 1), align="left",stacked=True, label=['made','all'], color=['green', '#ff7f0e'])
plt.legend(loc="upper center")
if most_or_least and final_distance and final_attempt:
ax.text(30 + 12, 1, most_or_least+" effective shot: "+str(final_distance)+"\n Attempt: "+final_attempt+"\n\nMetrics:\n FG%: "+str(fg_pct)+"\n eFG%: "+str(efg_pct), bbox=dict(facecolor='red', alpha=0.5))
else:
ax.text(30 + 12, 1, "Metrics:\n FG%: "+str(fg_pct)+"\n eFG%: "+str(efg_pct), bbox=dict(facecolor='red', alpha=0.5))
return
def list_game_ids(self,year,month,day):
"Lists unique game ids in `dataframe` for a given date"
return self.dataframe.loc[(self.dataframe['year']==year) & (self.dataframe['month']==month) & (self.dataframe['day']==day)][['game_id','winner','loser']].drop_duplicates()
@delegates(__plot_shot_chart)
def plot_shots(self,date_range:Union[str,tuple,int]="all",**kwargs):
"Plots the shot chart for a given `date_range` including `made`, `missed` and `attempt` shots within `distances`"
#use kwargs
if date_range == "all":
shots_df = self.dataframe
elif type(date_range) == str:
shots_df = self.dataframe.loc[self.dataframe['game_id']==date_range]
elif type(date_range) == int:
#means month
shots_df = self.dataframe.loc[self.dataframe["month"]==date_range]
else:
copy_df = self.dataframe.copy()
copy_df['Timestamp'] = pd.to_datetime(copy_df.loc[:][['year','month','day']])
# Create a DatetimeIndex and assign it to the dataframe.
copy_df.index = pd.DatetimeIndex(copy_df.Timestamp)
shots_df = copy_df.loc[str(str(date_range[0][0])+"-"+str(date_range[0][1])+"-"+str(date_range[0][2])):str(str(date_range[1][0])+"-"+str(date_range[1][1])+"-"+str(date_range[1][2]))]
self.__plot_shot_chart(shots_df, **kwargs)
self.__plot_hist_volume(shots_df, self.__calculate_metric(self.dataframe, "fg"), self.__calculate_metric(self.dataframe, "efg"))
plt.show()
@delegates(__plot_shot_chart)
def plot_effective(self, most_or_least="most",metric:str="efg", min_shots:Union[str,int]="none", exclude:Union[str,List["str"]]="none", **kwargs):
"Plots the shot chart based on `most_or_least` considering a given `metric` for `date_range` including `made`, `missed` and `attempt` shots within `distances`. You can optionally `exclude` some shots. The `min_shots` option lets you filter based on a minimum ammount of shots taken per distance, auto == uniform distribution [0ft,29ft] as tracked by https://stats.nba.com/players/shooting/?sort=25-29%20ft.%20FGA&dir=1&Season=2019-20&SeasonType=Regular%20Season&CF=PLAYER_NAME*E*"
distances = ["0ft","1ft","2ft","3ft","4ft","5ft","6ft","7ft","8ft","9ft","10ft","11ft","12ft","13ft","14ft","15ft","16ft","17ft","18ft","19ft","20ft","21ft","22ft","23ft","24ft","25ft","26ft","27ft","28ft"]
distances = set(distances).intersection(set(self.dataframe['distance'].drop_duplicates().to_list()))
if type(exclude) == list:
distances = list(set(distances) - set(exclude))
#if auto, use uniform distro
if min_shots == "none":
new_df = self.dataframe
elif min_shots == "auto":
min_value = round(len(self.dataframe)/30,0) # [0ft, 29ft]
new_df = pd.DataFrame(columns = self.dataframe.columns)
for distance in distances:
if len(self.dataframe.loc[self.dataframe['distance']==distance]) >= min_value:
new_df = new_df.append(self.dataframe.loc[self.dataframe['distance']==distance])
else:
min_value = min_shots
new_df = pd.DataFrame(columns = self.dataframe.columns)
for distance in distances:
if len(self.dataframe.loc[self.dataframe['distance']==distance]) >= min_value:
new_df = new_df.append(self.dataframe.loc[self.dataframe['distance']==distance])
if most_or_least == "most":
max_fg = -1
max_efg = -1
final_distance = ""
for attempt in ["2-pointer", "3-pointer"]:
for distance in distances:
player_df = new_df.loc[(new_df["distance"]==distance) & (new_df["attempt"]==attempt)]
if len(player_df) == 0:
continue
fg_pct = self.__calculate_metric(player_df, "fg")
efg_pct = self.__calculate_metric(player_df, "efg")
if metric == "fg":
if fg_pct > max_fg:
max_fg = fg_pct
max_efg = efg_pct
final_distance = distance
final_attempt = attempt
else:
if efg_pct > max_efg:
max_fg = fg_pct
max_efg = efg_pct
final_distance = distance
final_attempt = attempt
else:
max_fg = 101
max_efg = 101
final_distance = ""
for attempt in ["2-pointer", "3-pointer"]:
for distance in distances:
player_df = new_df.loc[(new_df["distance"]==distance) & (new_df["attempt"]==attempt)]
if len(player_df) == 0:
continue
fg_pct = self.__calculate_metric(player_df, "fg")
efg_pct = self.__calculate_metric(player_df, "efg")
if metric == "fg":
if fg_pct < max_fg:
max_fg = fg_pct
max_efg = efg_pct
final_distance = distance
final_attempt = attempt
else:
if efg_pct < max_efg:
max_fg = fg_pct
max_efg = efg_pct
final_distance = distance
final_attempt = attempt
player_df = new_df.loc[(new_df["distance"]==final_distance) & (new_df["attempt"] == final_attempt)]
self.__plot_shot_chart(player_df, **kwargs)
all_shots = self.dataframe
self.__plot_hist_volume(all_shots, fg_pct=max_fg, efg_pct=max_efg, most_or_least=most_or_least, final_distance=final_distance, final_attempt=final_attempt)
plt.show()
# Cell
def list_teams(dataframe):
"Lists all the teams in `dataframe`"
return dataframe['team'].drop_duplicates()
# Cell
def list_game_ids(dataframe,year,month,day):
"Lists unique game ids in `dataframe` for a given date"
return dataframe.loc[(dataframe['year']==year) & (dataframe['month']==month) & (dataframe['day']==day)][['game_id','winner','loser']].drop_duplicates()
# Cell
class TeamShots(Shots):
"Team shots"
def __init__(self, dataframe, team):
dataframe = dataframe.loc[dataframe['team']==team].copy()
self.team = team
super().__init__(dataframe)
# Cell
def list_team_players(dataframe, team):
"Lists the players in `dataframe` who took shots for a `team`"
return dataframe.loc[dataframe['team']==team].groupby('shots_by').shots_by.count().reset_index(name='count').sort_values(['count'], ascending=False)
# Cell
class PlayerShots(Shots):
"Player shots"
def __init__(self, dataframe, player):
self.team_total_shots = len(dataframe.loc[dataframe['team'] == dataframe.loc[dataframe['shots_by']==player]['team'].to_list()[0]])
dataframe = dataframe.loc[dataframe['shots_by']==player].copy()
self.player = player
super().__init__(dataframe)
| 46.717241
| 488
| 0.600581
|
ff8e52c2a7ebf6a9b097e8dd37f7073af382f713
| 308
|
py
|
Python
|
python/graphscope/experimental/nx/tests/algorithms/forward/centrality/test_reaching.py
|
wenyuanyu/GraphScope
|
a40ccaf70557e608d8b091eb25ab04477f99ce21
|
[
"Apache-2.0"
] | 2
|
2020-12-15T08:42:10.000Z
|
2022-01-14T09:13:16.000Z
|
python/graphscope/experimental/nx/tests/algorithms/forward/centrality/test_reaching.py
|
wenyuanyu/GraphScope
|
a40ccaf70557e608d8b091eb25ab04477f99ce21
|
[
"Apache-2.0"
] | 1
|
2020-12-22T13:15:40.000Z
|
2020-12-22T13:15:40.000Z
|
python/graphscope/experimental/nx/tests/algorithms/forward/centrality/test_reaching.py
|
wenyuanyu/GraphScope
|
a40ccaf70557e608d8b091eb25ab04477f99ce21
|
[
"Apache-2.0"
] | 1
|
2021-11-23T03:40:43.000Z
|
2021-11-23T03:40:43.000Z
|
import networkx.algorithms.centrality.tests.test_reaching
import pytest
from graphscope.experimental.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.centrality.tests.test_reaching,
decorators=pytest.mark.usefixtures("graphscope_session"))
| 38.5
| 81
| 0.811688
|
0942113b2c99f43b18d9c3e9632075ae208fc1fe
| 22,525
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations_async/_gallery_image_versions_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations_async/_gallery_image_versions_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations_async/_gallery_image_versions_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryImageVersionsOperations:
"""GalleryImageVersionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "models.GalleryImageVersion",
**kwargs
) -> "models.GalleryImageVersion":
cls = kwargs.pop('cls', None) # type: ClsType["models.GalleryImageVersion"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(gallery_image_version, 'GalleryImageVersion')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "models.GalleryImageVersion",
**kwargs
) -> "models.GalleryImageVersion":
"""Create or update a gallery Image Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition in which the Image Version
is to be created.
:type gallery_image_name: str
:param gallery_image_version_name: The name of the gallery Image Version to be created. Needs
to follow semantic version name pattern: The allowed characters are digit and period. Digits
must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`.
:type gallery_image_version_name: str
:param gallery_image_version: Parameters supplied to the create or update gallery Image Version
operation.
:type gallery_image_version: ~azure.mgmt.compute.v2019_03_01.models.GalleryImageVersion
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: GalleryImageVersion, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.GalleryImageVersion
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GalleryImageVersion"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version_name,
gallery_image_version=gallery_image_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
expand: Optional[str] = "ReplicationStatus",
**kwargs
) -> "models.GalleryImageVersion":
"""Retrieves information about a gallery Image Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition in which the Image Version
resides.
:type gallery_image_name: str
:param gallery_image_version_name: The name of the gallery Image Version to be retrieved.
:type gallery_image_version_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryImageVersion, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.GalleryImageVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GalleryImageVersion"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
**kwargs
) -> None:
"""Delete a gallery Image Version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition in which the Image Version
resides.
:type gallery_image_name: str
:param gallery_image_version_name: The name of the gallery Image Version to be deleted.
:type gallery_image_version_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
def list_by_gallery_image(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs
) -> AsyncIterable["models.GalleryImageVersionList"]:
"""List gallery Image Versions in a gallery Image Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the Shared Image Gallery Image Definition from which the
Image Versions are to be listed.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryImageVersionList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_03_01.models.GalleryImageVersionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.GalleryImageVersionList"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_gallery_image.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryImageVersionList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_gallery_image.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions'} # type: ignore
| 50.167038
| 247
| 0.677647
|
56fcb70df1f09764611db868fdd757d4f7135e38
| 78,531
|
py
|
Python
|
FIDL/decompiler_utils.py
|
mrj0n3s/FIDL
|
e6ceb000cda43b450717eb171309c02dee06dd4f
|
[
"MIT"
] | null | null | null |
FIDL/decompiler_utils.py
|
mrj0n3s/FIDL
|
e6ceb000cda43b450717eb171309c02dee06dd4f
|
[
"MIT"
] | null | null | null |
FIDL/decompiler_utils.py
|
mrj0n3s/FIDL
|
e6ceb000cda43b450717eb171309c02dee06dd4f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# ===========================================================================
# Decompiler utils
#
# This is a set of utilities wrapping the decompiler API into something
# sane. This code focus on vulnerability research and bug hunting,
# however most of the functionality is generic enough to be used for
# broader reverse engineering purposes.
#
# Carlos Garcia Prado
# <carlos.garcia@fireeye.com>
# ===========================================================================
__version__ = '1.2'
from idc import *
from idaapi import *
from idautils import *
import ida_hexrays
from FIDL.compiler_consts import expr_condition
from FIDL.compiler_consts import expr_ctype # To pretty print debug messages
from FIDL.compiler_consts import expr_final, expr_assignments, insn_conditions
import os
import random
import traceback
import networkx as nx
from collections import namedtuple, defaultdict, OrderedDict
from six.moves import xrange
DEBUG = False
# ===========================================================
# Auxiliary functions
# ===========================================================
def dprint(s=""):
"""This will print a debug message only if debugging is active
:param s: The debug message
:type s: str, optional
"""
if DEBUG:
print(s)
# networkx expects nodes to be hashable. We monkey patch some of IDA's type to
# implement the __hash__ method so they can be used as nodes.
_hash_from_obj_id = lambda self: hash(self.obj_id)
ida_hexrays.cexpr_t.__hash__ = _hash_from_obj_id
ida_hexrays.cinsn_t.__hash__ = _hash_from_obj_id
ida_hexrays.carg_t.__hash__ = _hash_from_obj_id
def debug_get_break_statements(c):
for n in c.g.nodes():
cex = get_expr(n)
if cex.op == cit_break:
return [c.ea]
operands = blowup_expression(cex)
for operand in operands:
if operand.op == cit_break:
return [c.ea]
return [0]
def debug_stahp():
"""Toggles ``DEBUG`` value, useful for testing
"""
global DEBUG
DEBUG = not DEBUG
def NonLibFunctions(start_ea=None, min_size=0):
"""Generator yielding only non-lib functions
:param start_ea: Address to start looking for non-library functions.
:type start_ea: int, optional
:param min_size: Minimum function size. Useful to filter small, \
uninteresting functions.
:type min_size: int, optional
"""
for f_ea in Functions(start=start_ea):
flags = get_func_attr(f_ea, FUNCATTR_FLAGS)
if flags & FUNC_LIB or flags & FUNC_THUNK:
continue
# Min size check
f = get_func(f_ea)
if f.size() < min_size:
continue
yield f_ea
def do_for_all_funcs(func, fast=True, start_ea=None, blacklist=None,
min_size=100, **kwargs):
"""This is a generic wrapper for all kinds of logic that we want to apply \
to all the functions in the binary.
:param func: function "pointer" performing the analysis. \
Its only mandatory argument is a :class:`controlFlowinator` object.
:type func: function
:param fast: parameter fast for the :class:`controlFlowinator` object.
:type fast: boolean, optional
:param start_ea: Address to start looking for non-library functions.
:type start_ea: int, optional
:param blacklist: a function determining whether to process a function.\
Implemented via dependency injection.
:type blacklist: function, optional
:return: A list of JSON-like messages (individual function results)
:rtype: list
"""
results = []
for f_ea in NonLibFunctions(start_ea=start_ea, min_size=min_size):
# Since this is pretty CPU intensive, the following
# offers a mechanism for skipping some functions
def nope(x): return False
is_blacklisted = blacklist or nope
if is_blacklisted(f_ea):
continue
try:
c = controlFlowinator(ea=f_ea, fast=fast) # CFG
except Exception as e:
print(e)
continue
res = func(c, **kwargs)
# Functions return a __list__ of JSON-like messages
# Consolidate all lists into one by using the
# overloaded addition operator
if res:
results += res
return results
def my_get_func_name(ea):
"""Wrapper for :class:`get_func_name` handling some corner cases.
:param ea: Address of the function to resolve its name
:type ea: int
"""
f_name = get_func_name(ea)
if not f_name:
# This could be a function pointer
# stored in a global variable
f_name = get_name(ea)
if not f_name:
# Maybe an import?
# TODO: some kind of caching or memoization
imp = cImporter()
import_dict = imp.get_imports_info()
f_name = import_dict.get(ea, "")
return f_name
class cImporter:
"""Collect import information
This is mainly to work around the fact that :func:``get_func_name`` does \
not resolve imports...
"""
def __init__(self):
self.import_dict = {}
def _imp_cb(self, ea, name, ord):
if name:
self.import_dict[ea] = name
else:
print("Could not resolve import name!")
return True
def _find_imports_info(self):
nimps = get_import_module_qty()
for i in xrange(0, nimps):
name = get_import_module_name(i)
if not name:
continue
enum_import_names(i, self._imp_cb)
def get_imports_info(self):
self._find_imports_info()
return self.import_dict
class BBGraph(object):
"""Representation of the assembly CFG for a function
"""
def __init__(self, f_ea):
self.f_ea = f_ea
self.f = FlowChart(get_func(f_ea), None, FC_PREDS)
self.bb_list = self._get_basic_blocks()
def _get_basic_blocks(self):
"""List of tuples containing basic blocks limits"""
return [(bb.startEA, bb.endEA) for bb in self.f]
def _get_function_graph(self):
"""It creates a graph of basic blocks and their children.
returns: dict { block_ea: [branch1_ea, branch2_ea], ... }
"""
bb_dict = defaultdict(set) # Dict of BasicBlock objects
for bb in self.f:
for child in bb.succs():
bb_dict[bb.startEA].add(child.startEA)
return bb_dict
def _graph_to_networkx(self):
"""Gets a bb_dict (see _get_function_graph) and converts \
this to a NetworkX format
"""
bb_dict = self._get_function_graph()
if not bb_dict:
return None
try:
dg = nx.DiGraph()
except NameError as e:
# NetworkX is not installed
return None
for node, children in bb_dict.items():
for child in children:
dg.add_edge(node, child)
return dg
def _get_block_from_ea(self, ea):
"""It returns the _BasicBlock_ containing ``ea`` or None
"""
for bb in self.f:
# Remember that bb.endEA is bb.startEA of the next one!
if ea >= bb.startEA and ea < bb.endEA:
return bb
return None
def get_node(self, addr):
"""Given a function's address, returns the basic block (address) that \
contains it (or None)
:param addr: address within a function
:type addr: int
:return: Address of the node containing the input address
:rtype: int
"""
node_addr = None
for start, end in self.bb_list:
if addr >= start and addr < end:
# The address is within this basic block
node_addr = start
break
return node_addr
def find_connected_paths(self, bb_start, bb_end, co=10):
"""Leverages NetworkX to find all connected paths
:param bb_start: Initial basic block
:type bb_start: Basic block
:param bb_end: Final basic block
:type bb_end: Basic block
:param co: Cutoff parameter
:type co: int, optional
NOTE: the cutoff parameter in :class:`nx.all_simple_paths` serves \
two purposes:
- reduce the chances of CPU melting (algo is O(n!))
- nobody will inspect (manually) monstruous paths
:returns: generator of lists or None
"""
g = self._graph_to_networkx()
if not g:
return None
# Sanity check.
# Basic blocks within current function?
bbl = [bb.startEA for bb in self.f]
# bbl contains startEA's. However, we may have clicked
# *somewhere* within the basic block
_bb_start = self._get_block_from_ea(bb_start).startEA
_bb_end = self._get_block_from_ea(bb_end).startEA
if _bb_start in bbl and _bb_end in bbl:
paths = nx.all_simple_paths(
g,
source=_bb_start,
target=_bb_end,
cutoff=co)
return paths
else:
print('[!] find_connected_paths: check bb_start, bb_end parameters')
return None
# ===========================================================
# Lighthouse utilities
# (from: https://github.com/gaasedelen/lighthouse)
# These help processing the displayed decompiled code
# For example: painting specific lines of code
# ===========================================================
def map_line2citem(decompilation_text):
"""Part of `Lighthouse plugin <https://github.com/gaasedelen/lighthouse>`_
Map decompilation line numbers to citems.
This function allows us to build a relationship between citems in the
ctree and specific lines in the hexrays decompilation text.
"""
line2citem = {}
#
# it turns out that citem indexes are actually stored inline with the
# decompilation text output, hidden behind COLOR_ADDR tokens.
#
# here we pass each line of raw decompilation text to our lexer,
# extracting any COLOR_ADDR tokens as citem indexes
#
for line_number in xrange(decompilation_text.size()):
line_text = decompilation_text[line_number].line
line2citem[line_number] = lex_citem_indexes(line_text)
return line2citem
def map_line2node(cfunc, line2citem):
"""Part of `Lighthouse plugin <https://github.com/gaasedelen/lighthouse>`_
Map decompilation line numbers to node (basic blocks) addresses.
This function allows us to build a relationship between graph nodes
(basic blocks) and specific lines in the hexrays decompilation text.
"""
line2node = {}
treeitems = cfunc.treeitems
function_address = cfunc.entry_ea
bb_graph = BBGraph(function_address)
#
# prior to this function, a line2citem map was built to tell us which
# citems reside on any given line of text in the decompilation output.
#
# now, we walk through this line2citem map one 'line_number' at a time in
# an effort to resolve the set of graph nodes associated with its citems.
#
for line_number, citem_indexes in line2citem.items():
nodes = set()
#
# we are at the level of a single line (line_number). we now consume
# its set of citems (citem_indexes) and attempt to identify the explict
# graph nodes they claim to be sourced from (by their reported EA)
#
for index in citem_indexes:
# get the code address of the given citem
try:
item = treeitems[index]
address = item.ea
# apparently this is a thing on IDA 6.95
except IndexError as e:
continue
# find the graph node (eg, basic block) that generated this citem
node_addr = bb_graph.get_node(address)
# address not mapped to a node... weird. continue to the next citem
if not node_addr:
continue
#
# we made it this far, so we must have found a node that contains
# this citem. save the computed node_id to the list of of known
# nodes we have associated with this line of text
#
nodes.add(node_addr)
#
# finally, save the completed list of node ids as identified for this
# line of decompilation text to the line2node map that we are building
#
line2node[line_number] = nodes
# all done, return the computed map
return line2node
def lex_citem_indexes(line):
"""Part of `Lighthouse plugin <https://github.com/gaasedelen/lighthouse>`_
Lex all ctree item indexes from a given line of text.
The HexRays decompiler output contains invisible text tokens that can
be used to attribute spans of text to the ctree items that produced them.
"""
i = 0
indexes = set([])
line_length = len(line)
# lex COLOR_ADDR tokens from the line of text
while i < line_length:
# does this character mark the start of a new COLOR_* token?
if line[i] == COLOR_ON:
# yes, so move past the COLOR_ON byte
i += 1
# is this sequence for a COLOR_ADDR?
if ord(line[i]) == COLOR_ADDR:
# yes, so move past the COLOR_ADDR byte
i += 1
#
# A COLOR_ADDR token is followed by either 8, or 16 characters
# (a hex encoded number) that represents an address/pointer.
# in this context, it is actually the index number of a citem
#
citem_index = int(line[i:i + COLOR_ADDR_SIZE], 16)
i += COLOR_ADDR_SIZE
# SANITY CHECK
# NOTE: this value is arbitrary (although reasonable)
# FIX: get this from cfunc.treeitems.size()
if citem_index < 0x1000:
# save the extracted citem index
indexes.add(citem_index)
# skip to the next iteration as i has moved
continue
# nothing we care about happened, keep lexing forward
i += 1
# return all the citem indexes extracted from this line of text
return indexes
def map_node2lines(line2node):
"""Part of `Lighthouse plugin <https://github.com/gaasedelen/lighthouse>`_
Creates a mapping of nodes to lines of code
"""
node2lines = defaultdict(set)
for line, node_set in line2node.items():
for node_addr in node_set:
node2lines[node_addr].add(line)
return node2lines
def map_citem2line(line2citem):
"""Part of `Lighthouse plugin <https://github.com/gaasedelen/lighthouse>`_
Creates a mapping of citem indexes to lines of code
"""
citem2line = {}
for line, citem_l in line2citem.items():
for citem_idx in citem_l:
citem2line[citem_idx] = line
return citem2line
def citem2higher(citem):
"""This gets the higher representation of a given :class:``citem``, \
that is, a :class:``cinsn_t`` or :class:``cexpr_t``
:param citem: a :class:``citem`` object
:type citem: :class:``citem``
"""
if citem.is_expr():
return citem.cexpr
return citem.cinsn
# ===========================================================
# Hex-rays hacks :)
# ===========================================================
class my_var_t:
"""This wraps the :class:`lvar_t` nicely into a more usable data structure.
It aggregates several interesting pieces of information in one place. \
eg. ``is_arg``, ``is_constrained``, ``is_initialized``, etc.
The most commonly used attributes for this class are:
- name
- type_name
- size
- is_arg
- is_pointer
- is_array
- is_signed
:param var: an object representing a local variable or function argument
:type var: :class:`lvar_t`
"""
def __init__(self, var):
self.var = var
self.name = self.var.name
self.type_name = ''
self.size = 0
self.ti = None
self.is_signed = False
self.is_array = False
self.array_type = None
self.element_size = 0
self.array_len = 0
self.complex_type = None
self.is_pointer = False
self.pointed_type = None
# Convenience
self.is_arg = self.var.is_arg_var
# Shit gets real here
self.is_tainted = False
self.is_constrained = False
self.is_initialized = False
# Auxiliary for taint/constraint tracking
# A specific variable might be a function
# of others, ex: v1 = v2 * v3 + 1
# is_a_function_of -> [v2, v3]
self.is_a_function_of = []
self._get_var_type()
def _get_var_type(self):
"""Variable type information"""
tif = self.var.type() # tinfo_t
# This is useful to call its `is_int`
# and similar functions
# Ex. `my_var_t.ti.is_int()`
self.ti = tif
# Interesting for integer types
self.is_signed = tif.is_signed()
# Something like `__int64`
self.type_name = str(tif)
# For an array this is the number of elements
# Ex: char arr[1234] -> 1234
self.size = tif.get_size()
# This adds the type of the array
# Ex: __int16 arr[123] -> __int16
if tif.is_array():
self.is_array = True
self.array_type = tif.get_array_element()
self.element_size = self.array_type.get_size()
if self.element_size:
self.array_len = int(self.size / self.element_size)
# Ex: char *str -> char (None if the type is not a pointer)
if tif.is_ptr():
self.is_pointer = True
self.pointed_type = tif.get_pointed_object() or None
# Ex: struct _SYSTEMTIME st -> _SYSTEMTIME
if self.is_pointer:
_tif = self.pointed_type
else:
_tif = tif
self.complex_type = _tif.get_type_name() or ""
def __repr__(self):
print("Name: {}".format(self.name))
print(" Type name: {}".format(self.type_name))
print(" Size: {}".format(self.size))
# Optional stuff (not all vars have this)
if self.array_type:
print(" Array type: {}".format(self.array_type))
print(" Array element size: {}".format(self.element_size))
print(" Array length: {}".format(self.array_len))
if self.complex_type:
print(" Complex type: {}".format(self.complex_type))
# At a first glance, this may seem odd. It is correct.
pointed_type_s = "{}".format(self.pointed_type)
if len(pointed_type_s) and pointed_type_s != 'None':
print(" Pointed object: {}".format(pointed_type_s))
return ""
def get_return_type(cf=None):
"""Hack to get the return value of a function.
:param cf: the result of ``decompile()``
:type cf: :class:`ida_hexrays.cfuncptr_t`
:return: Type information for the return value
:rtype: :class:`tinfo_t`
"""
if not cf:
raise ValueError
ty = cf.type # tinfo_t (entire prototype)
ti = ty.get_rettype() # tinfo_t (return value)
return ti
# ===========================================================
# Convenience functions
# ===========================================================
class pseudoViewer:
"""This wraps the :class:`pseudoViewer` API neatly.
We need it because some things don't work unless \
you previously visited (or are currently visiting) \
the function whose decompiled form you want to analyze. \
Thus, we are forced to "Hack like in the movies"
TODO: probably deprecate this after IDA 7.5 changes
NOTE: the performance penalty is negligible
"""
silent_flags = ida_hexrays.OPF_REUSE | ida_hexrays.OPF_NO_WAIT
def __init__(self):
self.vdui = None
self.p_twidget = None
def show(self, ea=0, flags=silent_flags):
"""Displays the pseudoviewer widget
:param ea: adress of the function to display
:type ea: int, optional
:param flags: how to flags an existing pseudocode display, if any
:type flags: int, optional
"""
try:
self.vdui = open_pseudocode(ea, flags)
if self.vdui:
self.p_twidget = self.vdui.ct
except Exception as e:
print("ERROR (pseudoViewer.show) @ {:#08x}".format(ea))
print(e)
def close(self):
"""Closes the pseudoviewer widget
"""
close_pseudocode(self.p_twidget)
def get_function_vars(c=None, ea=0, only_args=False, only_locals=False):
"""Populates a dict of :class:`my_var_t` for the function
containing the specified ``ea``
:param c: a :class:`controlFlowinator` object, optional
:type c: :class:`controlFlowinator`
:param ea: the function address
:type ea: int
:param only_args: extract only function arguments
:type only_args: bool, optional
:param only_locals: extract only local variables
:type only_locals: bool, optional
:return: A dictionary of :class:`my_var_t`, indexed by their index
"""
if not c:
try:
cf = my_decompile(ea=ea)
except DecompilationFailure as e:
print("ERROR (get_function_vars)")
print(repr(str(e)))
return {}
else:
cf = c.cf
# Successful decompilation at `ea` point
# `cf.lvars` is an array of `lvars_t`
# `idx` is the index into this array to be used later
#
# I need to re-order the list of arguments.
# No idea why, but IDA does not spit the arguments in order.
# It keeps however a list of how the indexes are messed up in ``c.cf.argidx``
ordered_vars = [None] * len(cf.lvars)
for i, v in enumerate(cf.lvars):
if v.is_arg_var:
# Need to fix order
idx = cf.argidx[i]
else:
# Local vars seem to be fine
idx = i
ordered_vars[idx] = v
if only_args:
return OrderedDict({idx: my_var_t(v) for idx, v in enumerate(ordered_vars)
if v.is_arg_var and v.name})
elif only_locals:
return OrderedDict({idx: my_var_t(v) for idx, v in enumerate(ordered_vars)
if not v.is_arg_var and v.name})
else:
return OrderedDict({idx: my_var_t(v) for idx, v in enumerate(ordered_vars)})
def ref2var(ref, c=None, cf=None):
"""Convenient wrapper to streamline the conversions
between ``var_ref_t`` and ``lvar_t``
:param c: a :class:`controlFlowinator` object, optional
:type c: :class:`controlFlowinator`
:param cf: a decompilation object (usually the result of ``decompile``), optional
:type cf: a :class:`cfunc_t` object
:param ref: a reference to a variable in the pseudocode
:type ref: :class:`var_ref_t`
:return: a :class:`lvar_t` object
:rtype: :class:`lvar_t`
"""
if not c and not cf:
raise RuntimeError('Need c or cf parameters. None was passed to ref2var')
if not cf and c:
cf = c.cf
return cf.lvars[ref.v.idx]
def is_arithmetic_expression(cex, only_these=[]):
"""Checks whether this is an arithmetic expression.
:param cex: expression, usually this is a *node*.
:type cex: :class:`cexpr_t`
:param only_these: a list of arithmetic expressions to look for. \
These are defined in :mod:`ida_hexrays`
:type only_these: a list of ``cot_*`` constants, eg. ``cot_add``.
:return: True or False
:rtype: bool
"""
arith_ops = [cot_add, cot_mul, cot_sub]
interesting_ops = only_these or arith_ops
if cex.op in interesting_ops:
return True
# Let's go down only a level since most common expressions
# are something like "v4 + 1", "v3 * 2 + 1", etc.
_x = cex.x
_y = cex.y
for e in (_x, _y):
# e: left / right hand side of the comparison
if e and e.op in interesting_ops:
return True
return False
def is_binary_truncation(cex):
"""Looking for expressions truncating a number
These expressions are of the form ``v1 & 0xFFFF`` or alike
:param cex: an expression
:type cex: :class:cexpr_t
:return: True or False
:rtype: bool
"""
res = find_elements_of_type(cex, element_type=cot_band)
if not res:
return False
for band_e in res:
rhs = band_e.y
if is_number(rhs):
return True
return False
# ============================================================
# The following wrappers are not rocket science
# but the code resulting from using them
# is __way__ more legible...
# ============================================================
def is_array_indexing(ins):
if ins.op == cot_idx:
return True
return False
def is_cast(ins):
if ins.op == cot_cast:
return True
return False
def decast(ins):
"""Remove the ``cast``, returning the casted element
"""
if is_cast(ins):
ins = ins.x
return ins
def is_asg(ins):
if ins.op in expr_assignments.keys():
return True
return False
def is_call(ins):
if ins.op == cot_call:
return True
return False
def is_helper(ins):
"""Helpers are IDA macros,
e.g. __ROR__ or LOBYTE
"""
if ins.op == cot_helper:
return True
return False
def is_ref(ins):
if ins.op == cot_ref:
return True
return False
def ref_to(ins):
if ins.op == cot_ref:
ins = ins.x
return ins
def is_ptr(ins):
if ins.op == cot_ptr:
return True
return False
def points_to(ins):
if ins.op == cot_ptr:
ins = ins.x
return ins
def is_number(ins):
"""Convenience wrapper"""
if ins.op == cot_num:
return True
return False
def num_value(ins):
"""Returns the numerical value of ``ins``
:param ins: :class:`cexpr_t` or :class:`insn_t`
"""
if not is_number(ins):
raise TypeError
return ins.n.value(ins.type)
def is_string(ins):
"""Convenience wrapper"""
if ins.is_cstr():
return True
return False
def is_member_pointer(ins):
"""Convenience wrapper"""
if ins.op == cot_memptr:
return True
return False
def is_struct_member(ins):
"""Convenience wrapper"""
if ins.op == cot_memref:
return True
return False
def member_info(ins):
"""Returns info about a structure member
or a pointer to it
:param ins: :class:`cexpr_t` or :class:`insn_t`
"""
m_info = {
'type_name': str(ins.type),
'offset': ins.m,
'size': ins.type.get_size(),
'struct_var_idx': ins.x.v.idx
}
return m_info
def string_value(ins):
"""Gets the string corresponding to ``ins``
Works with *C-str* and *Unicode*
:param ins: :class:`cexpr_t` or :class:`insn_t`
:return: string for this ``ins``
:rtype: string
"""
if not is_string(ins):
raise TypeError
str_ea = ins.obj_ea
str_type = get_str_type(str_ea) & 0xF
# Python 3: get_strlit_contents returns bytes now
str_b = get_strlit_contents(str_ea, -1, str_type)
return str_b.decode('utf-8')
def is_var(ins):
"""Whether this ``ins`` corresponds to a variable
Remember that if this evaluates to True, we are dealing
with an object of type ``var_ref_t`` which are pretty much
useless. We may want to convert this to a ``lvar_t`` and
even better to a :class:`my_var_t` afterwards.
:func:`ref2var` is a simple wrapper to perform the conversion
between reference and variable
"""
if ins.op == cot_var:
return True
return False
def is_global_var(ins):
"""Tells whether ``ins`` is a global variable
TODO: enhance this heuristic
:param ins: :class:`cexpr_t` or :class:`insn_t`
:return: True or False
:rtype: bool
"""
if ins.op == cot_obj:
if not ins.is_cstr():
if ins.obj_ea > 0:
return True
return False
def value_of_global(ins):
"""Returns the value of a global variable
"""
if not is_global_var(ins):
raise TypeError
return ins.obj_ea
def is_if(ins):
if ins.op == cit_if:
return True
return False
# ===========================================================
# Auxiliary
# ===========================================================
def my_decompile(ea=None):
"""This sets flags necessary to use this programmatically.
:param ea: Address within the function to decompile
:type ea: int
:return: decompilation object
:rtype: a :class:`cfunc_t`
"""
if not ea:
print("Please specify an address (ea)")
return None
try:
cf = decompile(
ea=ea,
flags=ida_hexrays.DECOMP_NO_WAIT | ida_hexrays.DECOMP_NO_CACHE
)
cf.refresh_func_ctext()
except ida_hexrays.DecompilationFailure as e:
print("Failed to decompile @ {:X}".format(ea))
cf = None
return cf
def dump_pseudocode(ea=0):
"""Debugging helper.
"""
print("~~~~ Dumping pseudocode ~~~~")
cf = my_decompile(ea=ea)
if not cf:
print("Failed to decompile @ {:X}".format(ea))
return None
ps = cf.pseudocode
for idx, sline in enumerate(ps):
print("[{}] {}".format(idx, tag_remove(sline.line)))
def dump_lvars(ea=0):
"""Debugging helper.
"""
print("~~~~ Dumping local variables ~~~~")
try:
c = controlFlowinator(ea=ea)
except Exception as e:
print("Failed to decompile {:X}".format(ea))
return None
print()
print(c.lvars)
def lines_and_code(cf=None, ea=0):
"""Mapping of line numbers and code
:param cf: a decompilation object
:type cf: an :class:`cfunc_t` object, optional
:param ea: Address within the function to decompile, if no `cf` is provided
:type ea: int, optional
:return: a dictionary of lines of code, indexed by line number
:rtype: dict
"""
code = {}
if not cf:
cf = my_decompile(ea=ea)
if cf:
# Decompilation successful
ps = cf.pseudocode
for idx, sline in enumerate(ps):
# Lines of code start at 1
code[idx + 1] = tag_remove(sline.line)
return code
def all_paths_between(c, start_node=None, end_node=None, co=40):
"""Calculates all paths between ``start_node`` and ``end_node``
Calculating paths is one of these things that \
is better done with the paralell index graph (``c.i_cfg``) \
It haywires when done with complex elements.
FIXME: the co (cutoff) param is necessary to avoid complexity
explosion. However, there is a problem if it's reached...
:param c: a :class:`controlFlowinator` object
:type c: :class:`controlFlowinator`
:param start_node: a :class:`controlFlowinator` node
:type start_node: :class:`cexpr_t`
:param start_node: a :class:`controlFlowinator` node
:type start_node: :class:`cexpr_t`
:param co: the *cutoff* value controls the maximum path length.
:type co: int, optional
:return: it **yields** a list of nodes for each path
:rtype: list
"""
if not start_node:
start_index = min(c.i_cfg.nodes)
else:
start_index = c.node2index[start_node]
# Use the node with the highest index
# as default value
if not end_node:
node_indexes = [n.index for n in c.g.nodes()]
# higher value is usually
# the last return node
if node_indexes:
end_index = max(node_indexes)
else:
end_index = start_index
else:
end_index = c.node2index[end_node]
# Find all *simple* paths
# These paths are lists of citem indexes
try:
all_paths_i = nx.all_simple_paths(
c.i_cfg,
source=start_index,
target=end_index,
cutoff=co)
except nx.NodeNotFound as e:
# TODO: this problem is related to
# the cutoff. Figure out what happens...
traceback.print_exc()
all_paths_i = []
# ------------------------------------------------
# Translate the indexes to cinsn_t, cexpr_t nodes
# before yielding back to the caller
# ------------------------------------------------
for p_i in all_paths_i:
p_nodes = [c.index2node[x] for x in p_i]
yield p_nodes
def display_node(c=None, node=None, color=None):
"""Displays a given node in the ``pseudoviewer``
:param c: a :class:`controlFlowinator` object
:type c: :class:`controlFlowinator`
:param node: a :class:`controlFlowinator` node
:type node: :class:`cexpr_t`
:param color: color to mark the line of code corresponding to `node`
:type color: int, optional
"""
cf = c.cf
pseudo = cf.pseudocode
line2citem = map_line2citem(pseudo)
citem2line = map_citem2line(line2citem)
code = lines_and_code(cf=cf)
if not code:
print("Mapping between lines and code not available!")
return
if not color:
color = random.randint(0x202020, 0x808080)
try:
line = citem2line[node.index]
except KeyError as e:
print("display_node :: Error @ citem2line")
print("{}: {}".format(line + 1, code[line + 1]))
# Paint it!
pseudo[line].bgcolor = color
refresh_idaview_anyway()
def display_path(cf=None, path=None, color=None):
"""Shows a path's code and colors its lines.
:param cf: a decompilation object
:type cf: an :class:`cfunc_t` object, optional
:param path: a list of ::class:`controlFlowinator` nodes
:type path: list
:param color: color to mark the lines of code corresponding to `path`
:type color: int, optional
:return: a list of function lines (path nodes)
:rtype: list
"""
path_lines = []
if not cf:
cf = my_decompile(ea=ea)
pseudo = cf.pseudocode
line2citem = map_line2citem(pseudo)
citem2line = map_citem2line(line2citem)
code = lines_and_code(cf=cf)
if not code:
print("Mapping between lines and code not available!")
return []
if not color:
color = random.randint(0x202020, 0x808080)
for node in path:
try:
line = citem2line[node.index]
except KeyError as e:
continue
print("{}: {}".format(line + 1, code[line + 1]))
path_lines.append(line + 1)
# Paint it!
pseudo[line].bgcolor = color
refresh_idaview_anyway()
return path_lines
def display_line_at(ea, silent=False):
"""Displays the line of pseudocode corresponding to ``ea``
This is useful to quickly answer questions like:
- "Is this function always called with its first parameter being a constant?"
- "I want to see all the error messages displayed by this function"
- etc.
:param ea: address of an element contained within the line to display
:type ea: int
:param silent: flag controlling verbose output
:type silent: bool
"""
err_fmt = "{:X} FAIL: {}"
try:
c = controlFlowinator(ea=ea)
except Exception as e:
err = "Failed to decompile"
return err_fmt.format(ea, err)
pseudo = c.cf.pseudocode
line2citem = map_line2citem(pseudo)
citem2line = map_citem2line(line2citem)
code = lines_and_code(cf=c.cf)
if not code:
print("Mapping between lines and code not available!")
found = None
for item in c.cf.treeitems:
# `node` is a `cexpr_t` that may contain the call within
# a more complex expression. Need to peek inside
if item.ea == ea:
found = True
break
if not found:
err = "Could find that address"
return err_fmt.format(ea, err)
try:
line = citem2line[item.index]
line_info = "{:X} {}: {}".format(ea, line + 1, code[line + 1])
if not silent:
print(line_info)
return line_info
except KeyError as e:
err = "KeyError:", e
return err_fmt.format(ea, err)
def display_all_calls_to(func_name):
"""Wrapping :func:`display_line_at` since this is the most common use of this API
:param func_name: name of the function to search references
:type func_name: string
"""
f_ea = get_name_ea_simple(func_name)
if f_ea == BADADDR:
print("Can not find {}".format(func_name))
return None
# We'll save the results and display all at the end,
# otherwise the output gets clobbered with IDA's logging
lines = []
for ref in XrefsTo(f_ea, True):
if not ref.iscode:
continue
lines.append(display_line_at(ref.frm, silent=True))
print("==================================================")
print("= All calls to {}".format(func_name))
print("==================================================")
for line in lines:
print("- {}".format(line))
# ===========================================================
# CFG reconstruction
# ===========================================================
class controlFlowinator:
"""This is the main object of FIDL's API.
It finds all decompiled code "blocks" and recreates a CFG based
on this information.
This gives us the best of both worlds: the possibility to
analyze a graph (like in disassembly mode) and the power
of :class:``citem`` based analysis.
Some analysis is performed *after* the CFG has been
constructed. These are rather cost intensive, so they are
turned off by default. Use ``fast=False`` to apply these
and get a better CFG.
:param ea: address of the function to analyze
:type ea: int
:param fast: Set to ``False`` for an object with richer information
:type fast: bool
"""
def __init__(self, ea=None, fast=True):
self.cf = my_decompile(ea=ea)
if not self.cf:
raise RuntimeError("controlFlowinator: failed to decompile")
self.ea = self.cf.entry_ea
self.fast = fast
self.g = None
# Information about local variables within this function
# This is a dict of `my_var_t` objects
self.lvars = None
# Information about _this_ function arguments
# This is a dict of `my_var_t` objects
self.args = None
# The return type of this function
self.ret_type = None
# A map of graph nodes and their indexes
self.index2node = {}
# Convenient to ve the reverse mapping
self.node2index = {}
# Superblock is the root
superblock = self.cf.body.cblock
self.i_cfg = self._get_root_cfg(superblock)
blocks = self._get_blocks_to_expand(superblock)
# Interim CFG of citem_t indexes
# It is easier to create the initial graph
# using indexes as nodes but convoluted to use
self._generate_i_cfg(blocks_to_expand=blocks)
# A `nx.Digraph` of `cinsn_t` or `cexpr_t`
# It is easier to operate on this one,
# it reduces API complexity
self._generate_better_cfg()
# =============================================================
# The hybrid-CFG is ready now. We add some (optional) features
# to this object, since they're useful in most cases
# =============================================================
# Information about calls within this function
# This is a list of `callObj` objects
self.calls = []
# Retrieves a list of `callObj's`
self._get_all_function_calls()
if not fast:
self.lvars = get_function_vars(c=self, only_locals=True)
self.args = get_function_vars(c=self, only_args=True)
self.ret_type = get_return_type(cf=self.cf)
def _e(self, index):
"""Syntactic sugar"""
try:
return self.cf.treeitems[index]
except Exception as e:
print(e)
def _stitch_together(self, nodes):
"""Stitches the cinsn_t in a block together
This is an auxiliary function since the
operation appears so commonly
(A, B, C, ..., X) => A->B->C->...->X
Returns a list of edges: [(A, B), (B, C), ...]
"""
edges = []
list_indexes = [x.index for x in nodes]
if len(list_indexes) < 2:
return []
for idx in xrange(len(list_indexes) - 1):
u = list_indexes[idx]
v = list_indexes[idx + 1]
edges.append((u, v))
return edges
def _get_root_cfg(self, super_block):
"""Creates the initial graph from the super_block
It contains all blocks at recursion level 0
Returns a `nx.Digraph`
"""
root_edges = self._stitch_together(super_block)
root = nx.DiGraph()
if root_edges:
root.add_edges_from(root_edges)
else:
# The function contains a single call
root_index = self.cf.body.index + 1
root.add_node(root_index)
return root
def _get_blocks_to_expand(self, node_list, succ=None):
"""Finds collapsed blocks to be expanded.
Returns a list of cinsn_t
"""
to_expand = []
complex_blocks = (cit_if, cit_do, cit_while,
cit_switch, cit_for, cit_goto)
for n in node_list:
if n.op in complex_blocks:
n.succ = succ
to_expand.append(n)
return to_expand
def _get_block_successor(self, block):
"""Gets the successor for a given block
FIXME: This has a problem with "void" function prototypes.
The lack of _return_ node results in an absence of an implicit
_ielse_ for the switch statements.
Maybe introduce a dummy "ret" node in the CF?
"""
succs = None
try:
succs = list(self.i_cfg.successors(block.index))
if len(succs) > 1:
print("More than one successor!")
print("Check this out")
if succs:
succ = succs[0]
else:
succ = None
except nx.NetworkXError as e:
print("_get_block_successor: {}".format(e))
succ = None
if not succ:
print("Block: {:#08x}".format(block.ea))
print(" succs: {}".format(succs))
print(" No successor! Take a look into this!")
return succ
def _expand_if_block(self, block):
"""Expands a given `if` block"""
new_blocks = []
iblock = {'ithen': block.cif.ithen}
ielse = block.cif.ielse
if ielse:
iblock['ielse'] = ielse
# 1. Find and save the original successor
# NOTE: There may not be a successor?
# ex: leaf branch
succ = self._get_block_successor(block)
# 2. If we have an ielse block, remove
# the implicit ielse (the existing edge)
if 'ielse' in iblock and succ:
try:
self.i_cfg.remove_edge(block.index, succ)
except nx.NetworkXError as e:
print(e)
# 3. Add the edges to this node
# (ithen and maybe ielse)
for what, bl in iblock.items():
branch = bl.cblock
# Calculate new blocks to expand
new_blocks += self._get_blocks_to_expand(branch, block.succ)
# Stitch the instructions together
if_edges = self._stitch_together(branch)
if if_edges:
self.i_cfg.add_edges_from(if_edges)
# The cblock has one ins at least
branch_ins = list(branch)
if block.succ:
for node in branch_ins:
if node.op == cit_break:
self.i_cfg.add_edge(node.index, block.succ)
first = branch_ins[0]
last = branch_ins[-1]
# Add the edge between the if ins and
# the first cblock element
self.i_cfg.add_edge(block.index, first.index)
# Connect to the orphaned successor
# Unless it is a return, etc.
if succ:
if last.op not in (cit_return, cit_break):
self.i_cfg.add_edge(last.index, succ)
return new_blocks
def _expand_switch_block(self, block):
"""Expands a given `switch` block"""
# 1. Find and save the original successor
# NOTE: There may not be a successor?
# ex: leaf branch
new_blocks = []
succ = self._get_block_successor(block)
# Remove the original edge
if succ:
self.i_cfg.remove_edge(block.index, succ)
switch = block.cswitch
for case in switch.cases:
case_block = case.cblock
u = block
case_ins = [i for i in case_block]
new_blocks += self._get_blocks_to_expand(case_ins)
for v in case_ins:
self.i_cfg.add_edge(u.index, v.index)
u = v
# If the last element is a 'break' we can throw
# this out and connect the last meaningful
# citem to the end of the switch
if v.op == cit_break:
self.i_cfg.remove_node(v.index)
if succ:
# u -> break => u -> succ
self.i_cfg.add_edge(case_ins[-2].index, succ)
return new_blocks
def _expand_do_block(self, block):
"""Expand a given `do` block"""
succ = self._get_block_successor(block)
# Remove the original edge
if succ:
self.i_cfg.remove_edge(block.index, succ)
do_body = block.cdo.body
do_block = do_body.cblock
# Calculate new blocks to expand
new_blocks = self._get_blocks_to_expand(do_block, succ)
# Stitch the instructions together
do_edges = self._stitch_together(do_block)
if do_edges:
self.i_cfg.add_edges_from(do_edges)
# The cblock has one ins at least
block_ins = list(do_block)
if succ:
for node in block_ins:
if node.op == cit_break:
self.i_cfg.add_edge(node.index, succ)
first = block_ins[0]
last = block_ins[-1]
# Add the edge between the do ins and
# the first cblock element
self.i_cfg.add_edge(block.index, first.index)
# Add the edge to the original successor
if succ:
self.i_cfg.add_edge(last.index, succ)
# Optional: Add a reference to show the looping structure
self.i_cfg.add_edge(last.index, block.index)
return new_blocks
def _expand_while_block(self, block):
"""Expand a given `while` block"""
succ = self._get_block_successor(block)
# Remove the original edge
if succ:
self.i_cfg.remove_edge(block.index, succ)
while_body = block.cwhile.body
while_block = while_body.cblock
# Calculate new blocks to expand
new_blocks = self._get_blocks_to_expand(while_block, succ)
# Stitch the instructions together
while_edges = self._stitch_together(while_block)
if while_edges:
self.i_cfg.add_edges_from(while_edges)
# The cblock has one ins at least
block_ins = list(while_block)
if succ:
for node in block_ins:
if node.op == cit_break:
self.i_cfg.add_edge(node.index, succ)
first = block_ins[0]
last = block_ins[-1]
# Add the edge between the while ins and
# the first cblock element
self.i_cfg.add_edge(block.index, first.index)
# Add the edge to the original successor
if succ:
self.i_cfg.add_edge(last.index, succ)
# Optional: Add a reference to show the looping structure
self.i_cfg.add_edge(last.index, block.index)
return new_blocks
def _expand_for_block(self, block):
"""Expand a given `for` block"""
succ = self._get_block_successor(block)
# Remove the original edge
if succ:
self.i_cfg.remove_edge(block.index, succ)
for_body = block.cfor.body
for_block = for_body.cblock
# Calculate new blocks to expand
new_blocks = self._get_blocks_to_expand(for_block, succ)
# Stitch the instructions together
for_edges = self._stitch_together(for_block)
if for_edges:
self.i_cfg.add_edges_from(for_edges)
# The cblock has one ins at least
block_ins = list(for_block)
if succ:
for node in block_ins:
if node.op == cit_break:
self.i_cfg.add_edge(node.index, succ)
first = block_ins[0]
last = block_ins[-1]
# Add the edge between the for ins and
# the first cblock element
self.i_cfg.add_edge(block.index, first.index)
# Add the edge to the original successor
if succ:
self.i_cfg.add_edge(last.index, succ)
# Optional: Add a reference to show the looping structure
self.i_cfg.add_edge(last.index, block.index)
return new_blocks
def _expand_goto_block(self, block):
"""Expands a given `goto` block"""
# Remove the current's `goto` successor
# pointing to the next instruction
succ = self._get_block_successor(block)
if succ:
self.i_cfg.remove_edge(block.index, succ)
# Target is identified by its label number
# NOTE: All citem_t has a label_number
target_label = block.cgoto.label_num
# Find the citem corresponding to that label
target_citem = self.cf.find_label(target_label)
target_idx = target_citem.index
if target_idx:
# goto -> target
self.i_cfg.add_edge(block.index, target_idx)
# This operation does not add new blocks
return []
def _generate_i_cfg(self, blocks_to_expand=[]):
"""This expands interesting blocks creating an
increasingly complex graph.
Each one of the methods `_expand_xxx_block`
modify the `self.i_cfg` Digraph
This works recursively until there are no blocks
left to expand
"""
if not blocks_to_expand:
return
for block in blocks_to_expand:
new_blocks = []
# Remove this block from the list
blocks_to_expand.remove(block)
# Modify CFG
if block.op == cit_if:
dprint()
dprint(">> IF block @ {:#08x}...".format(block.ea))
new_blocks = self._expand_if_block(block)
elif block.op == cit_do:
dprint()
dprint(">> DO block @ {:#08x}...".format(block.ea))
new_blocks = self._expand_do_block(block)
elif block.op == cit_while:
dprint()
dprint(">> WHILE block @ {:#08x}...".format(block.ea))
new_blocks = self._expand_while_block(block)
elif block.op == cit_switch:
dprint()
dprint(">> SWITCH block @ {:#08x}...".format(block.ea))
new_blocks = self._expand_switch_block(block)
elif block.op == cit_for:
dprint()
dprint(">> FOR block @ {:#08x}...".format(block.ea))
new_blocks = self._expand_for_block(block)
elif block.op == cit_goto:
dprint()
dprint(">> GOTO block @ {:#08x}...".format(block.ea))
new_blocks = self._expand_goto_block(block)
# Add new found blocks to the list
blocks_to_expand += new_blocks
self._generate_i_cfg(blocks_to_expand=blocks_to_expand)
def _generate_better_cfg(self):
"""Create a better representation of the interim CFG
This one has {cinsn_t, cexpr_t} as nodes, instead of indexes
"""
_lifted = {}
self.g = nx.DiGraph()
# ==================================================
# Mapping between nodes {cinsn_t, cexpr_t}
# and their indexes (from the computed i_cfg)
# ==================================================
_nodez = {}
j = 0
sind = 0 # single node index
for i in self.i_cfg.nodes():
obj = self._e(i)
hi = citem2higher(obj) # cinsn_t
if hi.op == cit_expr:
hi = hi.cexpr
j = hi.index
# i -> j
_lifted[i] = j
i = j
else:
_lifted[i] = i
sind = i
_nodez[i] = hi # cinsn_t or cexpr_t
# Replicate the interim graph's edges
# with {cinsn_t, cexpr_t} objects as nodes
# Update the `i_cfg` with the lifted node indexes
i_edges = list(self.i_cfg.edges())
if not i_edges and len(self.i_cfg.nodes()) == 1:
# Corner case:
# Only one node and no edges
self.g.add_node(_nodez[sind])
else:
# We have more than one node
# and edges between them
self.i_cfg = nx.DiGraph()
for u, v in i_edges:
# i -> j
u = _lifted.get(u, None) or u
v = _lifted.get(v, None) or v
self.g.add_edge(_nodez[u], _nodez[v])
self.i_cfg.add_edge(u, v)
# Save these mappings for later
self.index2node = _nodez
self.node2index = {v: k for k, v in _nodez.items()}
def _get_all_function_calls(self, with_helpers=True):
"""It does exactly what the name says
This is needed because calls don't always appear
in their own nodes (that'd be a `sub_xxx();`) but
may be 'embedded' in other expressions.
Ex: `v1 = sub_xxx();` (asg)
Returns: None (it sets self.calls)
"""
for n in self.g.nodes():
# Which nodes are prone to contain function calls?
cex = get_expr(n)
# This catches nodes that are pure calls
# ex: sub_xxx(1, 2);
if is_call(cex):
name = my_get_func_name(cex.x.obj_ea) or 'sub_unknown'
co = callObj(c=self, name=name, node=n, expr=cex)
self.calls.append(co)
# This recurses into more complex expressions
# looking for calls. A very simple example
# would be to check both sides of an assignment
operands = blowup_expression(cex)
for operand in operands:
if is_call(operand):
# The node in our CFG is the expression
# containing the function call
name = my_get_func_name(operand.x.obj_ea) or 'sub_unknown'
co = callObj(c=self, name=name, node=n, expr=operand)
self.calls.append(co)
if not with_helpers:
return
#
# Second pass to try to locate helper functions
#
for co in self.calls:
if co.call_ea == BADADDR:
# This may be a helper
helperz = find_elements_of_type(co.node, cot_helper)
if not helperz or len(helperz) > 1:
continue
h = helperz.pop()
co.name = h.helper
co.is_helper = True
# ================================================================================
# Debugging utilities
# ================================================================================
def dump_i_cfg(self):
"""Dump interim CFG for debugging purposes
"""
print("[DEBUG] Dumping CFG")
for u, v in self.i_cfg.edges():
print("{} -> {}".format(u, v))
print("[DEBUG] Writing GraphML...")
# Labeling the nodes
for node in self.i_cfg.nodes():
self.i_cfg.node[node]['label'] = "{}".format(node)
nx.write_graphml(self.i_cfg, r"D:\graphs\di.graphml")
def dump_cfg(self, out_dir):
"""Dump the CFG for debugging purposes
This dumps a representation of the CFG in DOT format.
To generate an image:
``dot.exe -Tpng decompiled.dot -o decompiled.png``
"""
dot = "digraph D {\n"
dot += "node [shape=record style=rounded fontname=\"Sans serif\" fontsize=\"8\"];\n"
# Labeling the nodes
for node in self.g.nodes():
node_fmt = "node_{} [label=\"{} ({}) ({:X})\"];\n"
# We can set different node attributes for specific opcodes
if node.opname == 'call':
node_fmt = "node_{} [fillcolor=lightblue style=\"rounded, filled\" label=\"{} ({}) ({:X})\"];\n"
elif node.opname.startswith('asg'):
node_fmt = "node_{} [fillcolor=green style=\"rounded, filled\" label=\"{} ({}) ({:X})\"];\n"
elif node.opname == 'if':
node_fmt = "node_{} [shape=diamond fillcolor=yellow style=filled label=\"{} ({}) ({:X})\"];\n"
elif node.opname == 'return':
node_fmt = "node_{} [shape=box fillcolor=red style=filled label=\"{} ({}) ({:X})\"];\n"
dot += node_fmt.format(
node.index,
node.opname,
node.index,
node.ea)
# Adding edges
for u, v in self.g.edges():
dot += "node_{} -> node_{};\n".format(
u.index,
v.index)
dot += "}\n"
print("[DEBUG] Writing DOT file...")
od = os.path.join(out_dir, "decompiled.dot")
with open(od, 'wb') as f:
f.write(bytes(bytearray(dot, "utf-8")))
print("[DEBUG] Done.")
def get_cfg_for_ea(ea, dot_exe, out_dir):
"""Debugging helper.
Uses ``DOT`` to create a ``.PNG`` graphic of the
:class:`ControlFlowinator` CFG and displays it.
:param ea: address of the function to analyze
:type ea: int
:param dot_exe: path to the ``DOT`` binary
:type dot_exe: string
:param out_dir: directory to write the ``.DOT`` file
:type out_dir: string
"""
try:
c = controlFlowinator(ea=ea)
except Exception as e:
print(e)
return
c.dump_cfg(out_dir)
cmd = "{dot_exe} -Tpng -o '{png_file}' '{dot_file}'".format(
dot_exe=dot_exe,
dot_file=os.path.join(out_dir, "decompiled.dot"),
png_file=os.path.join(out_dir, "decompiled.png"))
cmd2 = os.path.join(out_dir, "decompiled.png")
print("Trying to run: {}...".format(cmd))
os.system(cmd)
print("Trying to run: {}...".format(cmd2))
os.system(cmd2)
def debug_blownup_expressions(c=None, node=None):
""" Debugging helper.
Show all blown up expressions for this function.
:param c: a :class:`controlFlowinator` object
:type c: :class:`controlFlowinator`
"""
if not c and not node:
print("I need a controlFlowinator object")
return
if node:
elems = blowup_expression(node)
s_elems = ",".join([expr_ctype[e.op] for e in elems])
print("{:X} {}".format(node.ea, s_elems))
else:
for node in c.g.nodes:
ea = node.ea
if type(node) == cinsn_t:
node = get_cond_from_statement(node)
if not node:
print("{:X} ???".format(ea))
continue
elems = blowup_expression(node)
s_elems = ",".join([expr_ctype[e.op] for e in elems])
print("{:X} {}".format(ea, s_elems))
def create_comment(c=None, ea=0, comment=""):
"""Displays a comment at the line corresponding to ``ea``
:param c: a :class:`controlFlowinator` object
:type c: :class:`controlFlowinator`
:param ea: address for the comment
:type ea: int
:param comment: the comment to add
:type comment: string
:return: returns True if comment successfully created
:rtype: bool
"""
if not c:
# No `controlFlowinator` object supplied
# We have to decompile manually
if ea:
cf = my_decompile(ea=ea)
else:
raise ValueError
else:
cf = c.cf
tl = treeloc_t()
tl.ea = ea
#for all cases see https://www.hex-rays.com/products/ida/support/idapython_docs/ida_hexrays-module.html
for itp in [ITP_SEMI, ITP_CURLY1, ITP_CURLY2, ITP_COLON, ITP_BRACE1, ITP_BRACE2, ITP_ASM, ITP_ELSE, ITP_DO, ITP_CASE] + list(range(65)):#the range covers ITP_ARG1 to ITP_ARG64 and ITP_EMPTY(0)
tl.itp = itp
cf.set_user_cmt(tl, comment)
cf.save_user_cmts()
cf.__str__()#trigger string representation, otherwise orphan comments aren't detected
if not cf.has_orphan_cmts():
return True
cf.del_orphan_cmts()
return False
# ===========================================================
# Processing the CFG
# Convenience functions for cexpr_t elements
# ===========================================================
class callObj:
"""Auxiliary object for code clarity.
It represents the occurrence of a ``call`` expression.
:param name: name of the function called
:type name: string, optional
:param node: a :class:`controlFlowinator` node containing the call expression
:type node: :class:`controlFlowinator`
:param expr: the ``call`` expression element
:type expr: :class:`cexpr_t`
"""
def __init__(self, c=None, name="", node=None, expr=None):
self.c = c
self.name = name
self.ida_args = []
self.args = {}
self.ea = None
self.call_ea = None
self.ret_type = None
self.is_helper = False
# Node in our CFG containing the call expr
self.node = node
# The call expr itself (cot_call)
self.expr = expr
if self.expr:
# This is the Ea of the function
# being called at location `self.ea`
self.call_ea = self.expr.x.obj_ea
# This is the Ea of the `call` instruction
# and obviously of the decompiled function call
self.ea = self.expr.ea
self._populate_args()
self._populate_return_type()
def _populate_args(self):
"""Performs some arguments preprocessing"""
self.ida_args = list(self.expr.a)
self.args = {}
Rep = namedtuple('Rep', 'type val')
for i, raw_arg in enumerate(self.ida_args):
# To be sure. Idempotent.
arg = decast(raw_arg)
if is_number(arg):
rep = Rep(type='number', val=num_value(arg))
elif is_string(arg):
rep = Rep(type='string', val=string_value(arg))
elif is_var(arg):
# :class:`var_ref_t` -> :class:`lvar_t` -> :class:`my_var_t`
lv = ref2var(arg, c=self.c)
rep = Rep(type='var', val=my_var_t(lv))
elif is_global_var(arg):
rep = Rep(type='global', val=value_of_global(arg))
elif is_ref(arg):
# &v1
rep = Rep(type='ref', val=ref_to(arg))
elif is_ptr(arg):
# *v1
rep = Rep(type='ptr', val=points_to(arg))
else:
rep = Rep(type='unk', val=arg)
self.args[i] = rep
def _populate_return_type(self):
"""Finds the return type for the function being called
"""
tif = tinfo_t()
get_tinfo(tif, self.call_ea) or guess_tinfo(tif, self.call_ea)
self.ret_type = tif.get_rettype()
def __repr__(self):
"""Display a pretty representation for print"""
print("--------------------------------------")
print("Ea: {:X}".format(self.ea))
print("Target's Name: {}".format(self.name))
print("Target's Ea: {:X}".format(self.call_ea))
print("Target's ret: {}".format(self.ret_type))
print("Is helper: {}".format(self.is_helper))
print("Args:")
for i, arg in self.args.items():
print(" - {}: {}".format(i, arg))
return ""
def get_expr(n):
"""Returns the corresponding :class:`cexpr_t` in case `n` is
of type :class:`cinsn_t`. Idempotent otherwise.
"""
if n.op == cit_if:
cex = n.cif.expr
elif n.op == cit_return:
cex = n.creturn.expr
elif n.op == cit_for:
cex = n.cfor.expr
elif n.op == cit_while:
cex = n.cwhile.expr
elif n.op == cit_do:
cex = n.cdo.expr
else:
cex = n
return cex
def blowup_expression(cex, final_operands=None):
"""Extracts all elements of an expression
Ex: ``x + 1 < y`` -> ``{x, 1, y}``
:param cex: a :class:`cexpr_t` object
:type cex: :class:`cexpr_t`
:return: a *set* of elements (the *final_operands*)
:rtype: set
"""
# Recursion and default values in Python...
if final_operands is None:
# First call, before recursion
final_operands = set([])
operands = {}
# Quick and dirty operand extraction
# ----------------------------------
# NOTE: for now this does not look inside member offsets ('m')
op_names = ('x', 'y', 'z')
for name in op_names:
if hasattr(cex, name):
attr = getattr(cex, name)
if attr:
operands[name] = attr
# Special code to handle call arguments
if hasattr(cex, 'a') and cex.a:
for arg in cex.a:
arg_name = "a{}".format(arg.index)
operands[arg_name] = arg
# Corner case: this expression is a variable
if hasattr(cex, 'v') and cex.v:
v_name = "v{}".format(cex.v.idx)
operands[v_name] = cex
for op_name, operand in operands.items():
if is_final_expr(cex):
# If the expression itself is a final one
final_operands.add(cex)
elif not is_final_expr(operand):
# if visited_operands != None:
# visited_operands.append(operand)
if is_call(operand):
final_operands.add(operand)
blowup_expression(operand, final_operands)
else:
final_operands.add(operand)
dprint("> final: {} {:#08x}".format(
expr_ctype[operand.op], operand.ea))
return final_operands
def get_all_vars_in_node(cex):
"""Extracts all variables involved in an expression.
:param cex: typically a :class:`controlFlowinator` node
:type cex: :class:`cexpr_t`
:return: list of ``var_t`` indexes (to ``cf.lvars``)
:rtype: list
"""
set_elem = blowup_expression(cex)
var_indexes = [x.v.idx for x in set_elem if is_var(x)]
return var_indexes
def find_all_calls_to_within(f_name, ea=0, c=None):
"""Finds all calls to a function with the given name \
within the function containing the ``ea`` address.
Note that the string comparison is relaxed to find variants of it, that is,
searching for ``malloc`` will match as well ``_malloc``, ``malloc_0``, etc.
:param f_name: the function name to search for
:type f_name: string
:param ea: any address within the function that may contain the calls
:type ea: int
:param c: if specified, work on this ``controlFlowinator`` object
:type c: :class:`controlFlowinator`, optional
:return: a list of :class:`callObj`
:rtype: list
"""
call_objs = []
if c is None:
try:
c = controlFlowinator(ea=ea, fast=False)
except Exception as e:
print("Failed to find_all_calls_to_within {}".format(f_name))
print(e)
return []
for co in c.calls:
if f_name.lower() in co.name.lower():
call_objs.append(co)
return call_objs
def find_all_calls_to(f_name):
"""Finds all calls to a function with the given name
Note that the string comparison is relaxed to find variants of it, that is,
searching for ``malloc`` will match as well ``_malloc``, ``malloc_0``, etc.
:param f_name: the function name to search for
:type f_name: string
:return: a list of :class:`callObj`
:rtype: list
"""
f_ea = get_name_ea_simple(f_name)
if f_ea == BADADDR:
print("Failed to resolve address for {}".format(f_name))
return []
callz = []
callers = set()
for ref in XrefsTo(f_ea, True):
if not ref.iscode:
continue
# Get a set of unique *function* callers
f = get_func(ref.frm)
if f is None:
continue
f_ea = f.start_ea
callers.add(f_ea)
for caller_ea in callers:
c = find_all_calls_to_within(f_name, caller_ea)
callz += c
return callz
def find_elements_of_type(cex, element_type, elements=None):
"""Recursively extracts expression elements until \
a :class:`cexpr_t` from a specific group is found
:param cex: a :class:`cexpr_t` object
:type cex: :class:`cexpr_t`
:param element_type: the type of element we are looking for \
(as a ``cot_xxx`` value, see ``compiler_consts.py``)
:type element_type: a ``cot_xxx`` value (eg. ``cot_add``)
:return: a set of :class:`cexpr_t` of the specified type
:rtype: set
"""
if elements is None:
elements = set([])
operands = {}
# ===========================================
# This covers the case cex is itself of
# the type currently searched for :)
# ===========================================
if cex.op == element_type:
elements.add(cex)
# Quick and dirty operand extraction
# ----------------------------------
# TODO: for now this does not look inside member offsets ('m')
op_names = ('x', 'y', 'z')
for name in op_names:
if hasattr(cex, name):
attr = getattr(cex, name)
if attr:
operands[name] = attr
# Special code to handle call arguments
if hasattr(cex, 'a') and cex.a:
for arg in cex.a:
arg_name = "a{}".format(arg.index)
operands[arg_name] = arg
# Corner case: this expression is a variable
if hasattr(cex, 'v') and cex.v:
v_name = "v{}".format(cex.v.idx)
operands[v_name] = cex
for op_name, operand in operands.items():
if operand.op == element_type:
# If the expression itself is a sought one
elements.add(operand)
elif not is_final_expr(operand):
find_elements_of_type(operand, element_type, elements)
return elements
def is_final_expr(cex):
"""Helper for internal functions.
A final expression will be defined as one that \
can not be further decomposed, eg. number, var, string, etc.
Normally, you should not need to use this.
:param cex: a :class:`cexpr_t` object
:type cex: :class:`cexpr_t`
:return: True or False
:rtype: bool
"""
if cex.op in expr_final:
return True
else:
return False
def get_cond_from_statement(ins):
"""Given a ``cinsn_t`` representing a control flow structure \
(do, while, for, etc.), it returns the corresponding ``cexpr_t`` \
representing the condition/argument for that code construct.
This is useful since we usually want to peek into \
conditional statements...
:param ins: the :class:`cinsn_t` associated with a control flow structure
:type ins: :class:`cinsn_t`
:return: the condition or argument within that control flow structure
:rtype: :class:`cexpr_t`
"""
if ins.op == cit_while:
res = ins.cwhile.expr
elif ins.op == cit_do:
res = ins.cdo.expr
elif ins.op == cit_for:
res = ins.cfor.expr
elif ins.op == cit_if:
res = ins.cif.expr
elif ins.op == cit_return:
res = ins.creturn.expr
elif ins.op == cit_switch:
res = ins.cswitch.expr
else:
res = None
return res
def assigns_to_var(cex):
"""Does this :class:``cexpr_t`` assign a value to any variable?
TODO: this is limited for now to expressions of the type:
``v1 = something something``
:param cex: a :class:`cexpr_t` object
:type cex: :class:`cexpr_t`
:return: the assigned var index (to ``cf.lvars`` array) or -1 if the \
:class:`cexpr_t` does not assign to any variable
:rtype: int
"""
v = None
lvar_idx = -1
if not is_asg(cex):
# Currently assignments only
return -1
else:
# left hand side
_x = cex.x
# Get the lhs variable
if is_var(_x):
# v1 = ...
v = _x
elif is_ptr(_x):
# *v1 = ...
# *v1 -> v1
__x = _x.x
if is_var(__x):
v = __x
if v:
lvar_idx = v.v.idx
return lvar_idx
def does_constrain(node):
"""This tries to answer the question: "Does this ``node`` constrains variables in any way?"
Essentially it is looking for the occurrence of variables within known \
*constrainer constructs*, eg. inside an ``if`` condition.
TODO: many more heuristics can be included here
:param node: typically a :class:`controlFlowinator` node
:type node: :class:`cinsn_t` or :class:`cexpr_t`
:return: a set of variable indexes (to ``cf.lvars`` array)
:rtype: set
"""
constrained_var_idxs = set([])
if is_asg(node):
lhs = node.x
rhs = node.y
# =================================
# Binary truncation
# ex: v1 = v4 & 0xFFFF
# =================================
if is_binary_truncation(rhs):
var_indexes = get_all_vars_in_node(lhs)
# TODO: Refine this algorithm
v_idx = var_indexes[0]
return set([v_idx])
# ===========================
# Something simple as v1 = 0
# ===========================
if is_var(lhs) and is_number(rhs):
v_idx = lhs.v.idx
constrained_var_idxs.add(v_idx)
return constrained_var_idxs
# ===================================
# Statements containing a condition
# ex: if(v < MAX),
# for(i=0; i<10; i++), etc.
# ===================================
insn_cond = insn_conditions.keys()
if node.op in insn_cond:
# Unwrap real condition (ex. x < y)
cond = get_cond_from_statement(node)
if not cond:
return constrained_var_idxs
constrainers = expr_condition.keys()
if cond.op in constrainers:
# We are mostly interested in the left
# hand side (x) of the expression
lhs = cond.x
lhs_vars = find_elements_of_type(lhs, cot_var)
for e in lhs_vars:
v_idx = e.v.idx
constrained_var_idxs.add(v_idx)
dprint("Constraint found @ {:#08x}: {}".format(
node.ea, expr_ctype[cond.op]))
return constrained_var_idxs
# TODO: More constraining cases here
return set([])
def get_interesting_calls(c, user_defined=[]):
"""Not all functions are created equal.
We are interested in functions with certain names or substrings in it.
:param c: a :class:`controlFlowinator` object
:type c: :class:`controlFlowinator`
:param user_defined: a list of names (or substrings), if not supplied a \
hard-coded default list will be used.
:type user_defined: list, optional
:return: a list of :class:`callObj`
:rtype: list
"""
default_list = ['check', 'log', 'assert', 'cpy',
'copy', 'alloc', 'move', 'memset']
interesting_calls = user_defined or default_list
result = []
for co in c.calls:
# Check against a number of interesting function names
for f_name in interesting_calls:
if f_name in co.name.lower():
result.append(co)
return result
def is_write(node):
"""Try to find write primitives.
Looking for things like::
*(_DWORD *)(something) = v38
arr[i] = v21
TODO: Rather rough, it is a first version...
:param node: a :class:`controlFlowinator` node
:type node: :class:`cinsn_t` or :class:`cexpr_t`
:return: True or False
:rtype: bool
"""
if not is_asg(node):
return False
lhs = node.x
if is_ptr(lhs):
# *something
return True
if is_array_indexing(lhs):
# arr[i]
return True
return False
def is_read(ins):
"""Try to find read primitives.
Looking for things like::
v3 = *(_DWORD *)(v5 + 784)
NOTE: this will find expressions that are read && write,
since they are not mutually exclusive
TODO: Rather rough, it is a first version...
:param node: a :class:`controlFlowinator` node
:type node: :class:`cinsn_t` or :class:`cexpr_t`
:return: True or False
:rtype: bool
"""
if not is_asg(ins):
return False
rhs = ins.y
if is_ptr(rhs):
# *something
return True
return False
# ===========================================================
def main():
if not init_hexrays_plugin():
print("No decompiler found! :(")
return
# Ascii banner :)
print(r"""
███████╗██╗██████╗ ██╗
██╔════╝██║██╔══██╗██║
█████╗ ██║██║ ██║██║
██╔══╝ ██║██║ ██║██║
██║ ██║██████╔╝███████╗
╚═╝ ╚═╝╚═════╝ ╚══════╝
""")
print("Hex-rays version {} detected".format(get_hexrays_version()))
print("FIDL v.{} (\"{}\") loaded...".format(__version__, __codename__))
print("")
if __name__ == '__main__':
main()
| 28.640044
| 196
| 0.575225
|
63a01777f9add1543c801b6fb8a7f8397133783a
| 2,060
|
py
|
Python
|
tests/test_security/test_security_api_key_query_description.py
|
dantownsend/xpresso
|
a4c4dbe96972a6f0339f30d7d794932f70510eea
|
[
"MIT"
] | null | null | null |
tests/test_security/test_security_api_key_query_description.py
|
dantownsend/xpresso
|
a4c4dbe96972a6f0339f30d7d794932f70510eea
|
[
"MIT"
] | null | null | null |
tests/test_security/test_security_api_key_query_description.py
|
dantownsend/xpresso
|
a4c4dbe96972a6f0339f30d7d794932f70510eea
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, Generator
import pytest
from pydantic import BaseModel
from xpresso import App, Dependant, Path, Security
from xpresso.security import APIKeyQuery
from xpresso.testclient import TestClient
from xpresso.typing import Annotated
api_key = APIKeyQuery(name="key", description="API Key Query")
class User(BaseModel):
username: str
def get_current_user(oauth_header: Annotated[str, Security(api_key)]):
user = User(username=oauth_header)
return user
def read_current_user(current_user: Annotated[User, Dependant(get_current_user)]):
return current_user
app = App([Path("/users/me", get=read_current_user)])
@pytest.fixture
def client() -> Generator[TestClient, None, None]:
with TestClient(app) as client:
yield client
openapi_schema: Dict[str, Any] = {
"openapi": "3.0.3",
"info": {"title": "API", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
}
},
"security": [{"APIKeyQuery": []}],
}
}
},
"components": {
"securitySchemes": {
"APIKeyQuery": {
"type": "apiKey",
"name": "key",
"in": "query",
"description": "API Key Query",
}
}
},
}
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_security_api_key(client: TestClient):
response = client.get("/users/me?key=secret")
assert response.status_code == 200, response.text
assert response.json() == {"username": "secret"}
def test_security_api_key_no_key(client: TestClient):
response = client.get("/users/me")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
| 25.75
| 82
| 0.604854
|
56e550512d7e699360010d4b0986b534798a3b8a
| 3,085
|
py
|
Python
|
resources/lib/MSLHttpRequestHandler.py
|
hcordobes/plugin.video.netflix
|
0bf52c68b0202d61c799480f2c9d047f55c433e0
|
[
"MIT"
] | 17
|
2018-08-09T20:54:53.000Z
|
2019-11-23T03:17:55.000Z
|
resources/lib/MSLHttpRequestHandler.py
|
hcordobes/plugin.video.netflix
|
0bf52c68b0202d61c799480f2c9d047f55c433e0
|
[
"MIT"
] | 1
|
2019-01-07T19:44:07.000Z
|
2019-02-09T14:12:16.000Z
|
resources/lib/MSLHttpRequestHandler.py
|
hcordobes/plugin.video.netflix
|
0bf52c68b0202d61c799480f2c9d047f55c433e0
|
[
"MIT"
] | 14
|
2018-11-17T05:14:22.000Z
|
2019-06-03T12:22:11.000Z
|
# -*- coding: utf-8 -*-
# Author: trummerjo
# Module: MSLHttpRequestHandler
# Created on: 26.01.2017
# License: MIT https://goo.gl/5bMj3H
"""Handles & translates requests from Inputstream to Netflix"""
import base64
import BaseHTTPServer
from urlparse import urlparse, parse_qs
from SocketServer import TCPServer
from resources.lib.MSL import MSL
class MSLHttpRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handles & translates requests from Inputstream to Netflix"""
# pylint: disable=invalid-name
def do_HEAD(self):
"""Answers head requests with a success code"""
self.send_response(200)
# pylint: disable=invalid-name
def do_POST(self):
"""Loads the licence for the requested resource"""
length = int(self.headers.get('content-length'))
post = self.rfile.read(length)
data = post.split('!')
if len(data) is 2:
challenge = data[0]
sid = base64.standard_b64decode(data[1])
b64license = self.server.msl_handler.get_license(challenge, sid)
if b64license is not '':
self.send_response(200)
self.end_headers()
self.wfile.write(base64.standard_b64decode(b64license))
self.finish()
else:
self.server.nx_common.log(msg='Error getting License')
self.send_response(400)
else:
self.server.nx_common.log(msg='Error in License Request')
self.send_response(400)
# pylint: disable=invalid-name
def do_GET(self):
"""Loads the XML manifest for the requested resource"""
url = urlparse(self.path)
params = parse_qs(url.query)
if 'id' not in params:
self.send_response(400, 'No id')
else:
# Get the manifest with the given id
dolby = (True if 'dolby' in params and
params['dolby'][0].lower() == 'true' else 'false')
hevc = (True if 'hevc' in params and
params['hevc'][0].lower() == 'true' else 'false')
data = self.server.msl_handler.load_manifest(
int(params['id'][0]),
dolby, hevc)
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write(data)
def log_message(self, *args):
"""Disable the BaseHTTPServer Log"""
pass
##################################
class MSLTCPServer(TCPServer):
"""Override TCPServer to allow usage of shared members"""
def __init__(self, server_address, nx_common):
"""Initialization of MSLTCPServer"""
nx_common.log(msg='Constructing MSLTCPServer')
self.nx_common = nx_common
self.msl_handler = MSL(nx_common)
TCPServer.__init__(self, server_address, MSLHttpRequestHandler)
def reset_msl_data(self):
"""Initialization of MSLTCPServerResets MSL data (perform handshake)"""
self.msl_handler.perform_key_handshake()
| 33.901099
| 79
| 0.610049
|
7daafc6e210a6ac5e88736d87fd75a925f311747
| 3,686
|
py
|
Python
|
libraryBackend/Account/migrations/0001_initial.py
|
junaidgirkar/departmental-library-management-app
|
f14320214ff5903c296ee785a329335183b35402
|
[
"MIT"
] | 1
|
2021-11-17T20:13:01.000Z
|
2021-11-17T20:13:01.000Z
|
libraryBackend/Account/migrations/0001_initial.py
|
junaidgirkar/departmental-library-management-app
|
f14320214ff5903c296ee785a329335183b35402
|
[
"MIT"
] | null | null | null |
libraryBackend/Account/migrations/0001_initial.py
|
junaidgirkar/departmental-library-management-app
|
f14320214ff5903c296ee785a329335183b35402
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-22 13:26
import Account.managers
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"email",
models.EmailField(
max_length=254, unique=True, verbose_name="email address"
),
),
(
"first_name",
models.CharField(max_length=40, verbose_name="first_name"),
),
(
"last_name",
models.CharField(max_length=40, verbose_name="last name"),
),
(
"date_joined",
models.DateTimeField(auto_now_add=True, verbose_name="date joined"),
),
("is_active", models.BooleanField(default=True, verbose_name="active")),
(
"is_staff",
models.BooleanField(default=False, verbose_name="staff status"),
),
(
"is_superuser",
models.BooleanField(default=False, verbose_name="is superuser"),
),
(
"is_admin",
models.BooleanField(default=False, verbose_name="is admin"),
),
(
"is_student",
models.BooleanField(default=False, verbose_name="is student"),
),
(
"is_teacher",
models.BooleanField(default=False, verbose_name="is teacher"),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"verbose_name": "user",
"verbose_name_plural": "users",
},
managers=[
("objects", Account.managers.UserManager()),
],
),
]
| 34.773585
| 134
| 0.405317
|
c675a9c0ff5ae0fef3967630b1f90ee58bbc7ac1
| 1,771
|
py
|
Python
|
invoice/views.py
|
AhmedElmawary/erp
|
c998787c62194e26e10e3cbc61e35935e901e56d
|
[
"MIT"
] | null | null | null |
invoice/views.py
|
AhmedElmawary/erp
|
c998787c62194e26e10e3cbc61e35935e901e56d
|
[
"MIT"
] | null | null | null |
invoice/views.py
|
AhmedElmawary/erp
|
c998787c62194e26e10e3cbc61e35935e901e56d
|
[
"MIT"
] | null | null | null |
# import rest_framework
# from django.shortcuts import get_list_or_404, get_object_or_404
# from rest_framework.views import APIView
# from rest_framework.authentication import TokenAuthentication
# from rest_framework.permissions import (IsAuthenticated, IsAdminUser)
# from rest_framework.status import (HTTP_200_OK, HTTP_201_CREATED)
# from rest_framework.response import Response
# from .serializers import (
# InvoiceCreateSerializer,
# InvoiceGetSerializer,
# InvoiceListSerializer,
# )
# from typing import (Dict, List, Union)
# # from .models import SupplierInvoice
# class MainAuth:
# pass
# # authentication_classes = (TokenAuthentication, )
# # permission_classes = (IsAuthenticated,)
# class InitInvoice(MainAuth, APIView):
# queryset = SupplierInvoice.objects.filter()
# # Trying to start code as strict code
# class InvoiceCreate(InitInvoice):
# serializer_class: rest_framework.serializers.Serializer = InvoiceCreateSerializer
# def post(self, request, format=None):
# serialized = self.serializer_class(data=request.data)
# serialized.is_valid(raise_exception=True)
# serialized.save()
# return Response(serialized.data, HTTP_201_CREATED)
# class InvoiceGet(InitInvoice):
# serializer_class = InvoiceGetSerializer
# def get(self, request, pk):
# serilaized = self.serializer_class(get_object_or_404(self.queryset, pk= pk))
# return Response(serilaized.data, HTTP_200_OK)
# class InvoiceList(InitInvoice):
# serializer_class = InvoiceListSerializer
# def get(self, request):
# serialized = self.serializer_class(get_list_or_404(self.queryset), many=True)
# return Response(serialized.data, HTTP_200_OK)
| 33.415094
| 87
| 0.733484
|
edb1312d87ddd307efc63101c913e0890979fa06
| 673
|
py
|
Python
|
tests/conftest.py
|
SurajDonthi/Aspect-Based-Sentiment-Analysis
|
6f20357218b495f7ffa2d6fe9dfcb43b42a5c4f1
|
[
"Apache-2.0"
] | 351
|
2020-04-20T13:08:25.000Z
|
2022-03-28T11:02:13.000Z
|
tests/conftest.py
|
SurajDonthi/Aspect-Based-Sentiment-Analysis
|
6f20357218b495f7ffa2d6fe9dfcb43b42a5c4f1
|
[
"Apache-2.0"
] | 58
|
2020-04-23T05:58:14.000Z
|
2022-03-31T14:09:07.000Z
|
tests/conftest.py
|
SurajDonthi/Aspect-Based-Sentiment-Analysis
|
6f20357218b495f7ffa2d6fe9dfcb43b42a5c4f1
|
[
"Apache-2.0"
] | 72
|
2020-04-22T05:39:19.000Z
|
2022-03-23T20:49:56.000Z
|
import pytest
def pytest_addoption(parser):
parser.addoption("--run-slow", action="store_true",
default=False, help="Run slow tests")
def pytest_configure(config):
dsc = "slow: mark test, which can be slow to run"
config.addinivalue_line("markers", dsc)
def pytest_collection_modifyitems(config, items):
if config.getoption("--run-slow"):
# --run-slow given in cli: do not skip slow checks
return
reason_desc = "need --run-slow option to run"
skip_sanity_check = pytest.mark.skip(reason=reason_desc)
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_sanity_check)
| 29.26087
| 60
| 0.667162
|
353b1dd4c84736cde9f4cdd80cd06b2cdd7f50e1
| 17
|
py
|
Python
|
dcssearch/__init__.py
|
CharlieNPie/dcsdata
|
c1935e2cd969494364bd4949d08c558cbec7befd
|
[
"MIT"
] | null | null | null |
dcssearch/__init__.py
|
CharlieNPie/dcsdata
|
c1935e2cd969494364bd4949d08c558cbec7befd
|
[
"MIT"
] | null | null | null |
dcssearch/__init__.py
|
CharlieNPie/dcsdata
|
c1935e2cd969494364bd4949d08c558cbec7befd
|
[
"MIT"
] | null | null | null |
name = "dcsdata"
| 8.5
| 16
| 0.647059
|
4342546beac7efc0ef2c665f76c35b72b5de18e3
| 355
|
py
|
Python
|
src/pyoffice/outlook/windows/dasl/operator/LEOperator.py
|
qq809326636/pyoffice
|
a3c036ef82f6b0438c1e38a7675eb1f06c61144d
|
[
"MIT"
] | 7
|
2020-06-19T03:11:48.000Z
|
2020-11-18T06:14:21.000Z
|
src/pyoffice/outlook/windows/dasl/operator/LEOperator.py
|
qq809326636/pyoffice
|
a3c036ef82f6b0438c1e38a7675eb1f06c61144d
|
[
"MIT"
] | null | null | null |
src/pyoffice/outlook/windows/dasl/operator/LEOperator.py
|
qq809326636/pyoffice
|
a3c036ef82f6b0438c1e38a7675eb1f06c61144d
|
[
"MIT"
] | null | null | null |
from .BaseOperator import BaseOperator
__all__ = ['LEOperator']
class LEOperator(BaseOperator):
def __init__(self,
*args,
**kwargs):
BaseOperator.__init__(self,
*args,
**kwargs)
self.code = 21
self.op = '<='
| 18.684211
| 40
| 0.430986
|
c389d7ffb3ea217cf4b456fbcc2b13418cf6a676
| 34,228
|
py
|
Python
|
piicrgmms/sklearn_mixture_piicr/_strict_bayesian_mixture.py
|
colinweber27/piicrgmms
|
d74fb00c584b16f341c47a42a930e27efe4e360d
|
[
"MIT"
] | null | null | null |
piicrgmms/sklearn_mixture_piicr/_strict_bayesian_mixture.py
|
colinweber27/piicrgmms
|
d74fb00c584b16f341c47a42a930e27efe4e360d
|
[
"MIT"
] | null | null | null |
piicrgmms/sklearn_mixture_piicr/_strict_bayesian_mixture.py
|
colinweber27/piicrgmms
|
d74fb00c584b16f341c47a42a930e27efe4e360d
|
[
"MIT"
] | null | null | null |
"""Bayesian Gaussian Mixture Model."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# Contributor: Colin Weber <colin.weber.27@gmail.com>
# License: BSD 3 clause
import math
import numpy as np
from scipy.special import betaln, digamma, gammaln
from ._base import StrictBayesianBaseModel, _check_shape
from ._gaussian_mixture import _check_precision_matrix
from ._gaussian_mixture import _check_precision_positivity
from ._gaussian_mixture import _compute_log_det_cholesky
from ._gaussian_mixture import _compute_precision_cholesky
from ._gaussian_mixture import _estimate_gaussian_parameters
from ._gaussian_mixture import _estimate_log_gaussian_prob
from sklearn.utils import check_array
from sklearn.utils.validation import _deprecate_positional_args
def _log_dirichlet_norm(dirichlet_concentration):
"""Compute the log of the Dirichlet distribution normalization term.
Parameters
----------
dirichlet_concentration : array-like, shape (n_samples,)
The parameters values of the Dirichlet distribution.
Returns
-------
log_dirichlet_norm : float
The log normalization of the Dirichlet distribution.
"""
return (gammaln(np.sum(dirichlet_concentration)) -
np.sum(gammaln(dirichlet_concentration)))
def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features):
"""Compute the log of the Wishart distribution normalization term.
Parameters
----------
degrees_of_freedom : array-like, shape (n_components,)
The number of degrees of freedom on the covariance Wishart
distributions.
log_det_precisions_chol : array-like, shape (n_components,)
The determinant of the precision matrix for each component.
n_features : int
The number of features.
Return
------
log_wishart_norm : array-like, shape (n_components,)
The log normalization of the Wishart distribution.
"""
# To simplify the computation we have removed the np.log(np.pi) term
return -(degrees_of_freedom * log_det_precisions_chol +
degrees_of_freedom * n_features * .5 * math.log(2.) +
np.sum(gammaln(.5 * (degrees_of_freedom -
np.arange(n_features)[:, np.newaxis])), 0))
class StrictBayesianGaussianMixture(StrictBayesianBaseModel):
"""Strict Variational Bayesian estimation of a Gaussian mixture.
This class allows to infer an approximate posterior distribution over the
parameters of a Gaussian mixture distribution. The number of components
is given by the value of n_components, and remains fixed throughout the fit.
In order to accomplish this, the fit stops when the smallest cluster contains 0.1%
of the samples in the data set. Therefore, it rarely converges.
This class implements two types of prior for the weights distribution: a
finite mixture model with Dirichlet distribution and an infinite mixture
model with the Dirichlet Process. In practice Dirichlet Process inference
algorithm is approximated and uses a truncated distribution with a fixed
maximum number of components (called the Stick-breaking representation).
The number of components actually used almost always depends on the data.
.. version:: 0.1
Read more in the :ref:`User Guide <bgmm>`.
Parameters
----------
n_components : int, defaults to 1.
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'}, defaults to 'full'
String describing the type of covariance parameters to use.
Must be one of::
'full' (each component has its own general covariance matrix),
'tied' (all components share the same general covariance matrix),
'diag' (each component has its own diagonal covariance matrix),
'spherical' (each component has its own single variance).
tol : float, defaults to 1e-3.
The convergence threshold. EM iterations will stop when the
lower bound average gain on the likelihood (of the training data with
respect to the model) is below this threshold.
reg_covar : float, defaults to 1e-6.
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, defaults to 100.
The number of EM iterations to perform.
n_init : int, defaults to 1.
The number of initializations to perform. The result with the highest
lower bound value on the likelihood is kept.
init_params : {'kmeans', 'random'}, defaults to 'kmeans'.
The method used to initialize the weights, the means and the
covariances.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weight_concentration_prior_type : str, defaults to 'dirichlet_process'.
String describing the type of the weight concentration prior.
Must be one of::
'dirichlet_process' (using the Stick-breaking representation),
'dirichlet_distribution' (can favor more uniform weights).
weight_concentration_prior : float | None, optional.
The dirichlet concentration of each component on the weight
distribution (Dirichlet). This is commonly called gamma in the
literature. The higher concentration puts more mass in
the center and will lead to more components being active, while a lower
concentration parameter will lead to more mass at the edge of the
mixture weights simplex. The value of the parameter must be greater
than 0. If it is None, it's set to ``1. / n_components``.
mean_precision_prior : float | None, optional.
The precision prior on the mean distribution (Gaussian).
Controls the extent of where means can be placed. Larger
values concentrate the cluster means around `mean_prior`.
The value of the parameter must be greater than 0.
If it is None, it is set to 1.
mean_prior : array-like, shape (n_features,), optional
The prior on the mean distribution (Gaussian).
If it is None, it is set to the mean of X.
degrees_of_freedom_prior : float | None, optional.
The prior of the number of degrees of freedom on the covariance
distributions (Wishart). If it is None, it's set to `n_features`.
covariance_prior : float or array-like, optional
The prior on the covariance distribution (Wishart).
If it is None, the emiprical covariance prior is initialized using the
covariance of X. The shape depends on `covariance_type`::
(n_features, n_features) if 'full',
(n_features, n_features) if 'tied',
(n_features) if 'diag',
float if 'spherical'
random_state : int, RandomState instance or None, optional (default=None)
Controls the random seed given to the method chosen to initialize the
parameters (see `init_params`).
In addition, it controls the generation of random samples from the
fitted distribution (see the method `sample`).
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default to False.
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several times on similar problems.
See :term:`the Glossary <warm_start>`.
verbose : int, default to 0.
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
verbose_interval : int, default to 10.
Number of iteration done before the next print.
Attributes
----------
weights_ : array-like, shape (n_components,)
The weights of each mixture components.
means_ : array-like, shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array-like
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array-like
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on ``covariance_type``::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array-like
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on ``covariance_type``::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of inference to reach the
convergence.
lower_bound_ : float
Lower bound value on the likelihood (of the training data with
respect to the model) of the best fit of inference.
weight_concentration_prior_ : tuple or float
The dirichlet concentration of each component on the weight
distribution (Dirichlet). The type depends on
``weight_concentration_prior_type``::
(float, float) if 'dirichlet_process' (Beta parameters),
float if 'dirichlet_distribution' (Dirichlet parameters).
The higher concentration puts more mass in
the center and will lead to more components being active, while a lower
concentration parameter will lead to more mass at the edge of the
simplex.
weight_concentration_ : array-like, shape (n_components,)
The dirichlet concentration of each component on the weight
distribution (Dirichlet).
mean_precision_prior_ : float
The precision prior on the mean distribution (Gaussian).
Controls the extent of where means can be placed.
Larger values concentrate the cluster means around `mean_prior`.
If mean_precision_prior is set to None, `mean_precision_prior_` is set
to 1.
mean_precision_ : array-like, shape (n_components,)
The precision of each components on the mean distribution (Gaussian).
mean_prior_ : array-like, shape (n_features,)
The prior on the mean distribution (Gaussian).
degrees_of_freedom_prior_ : float
The prior of the number of degrees of freedom on the covariance
distributions (Wishart).
degrees_of_freedom_ : array-like, shape (n_components,)
The number of degrees of freedom of each components in the model.
covariance_prior_ : float or array-like
The prior on the covariance distribution (Wishart).
The shape depends on `covariance_type`::
(n_features, n_features) if 'full',
(n_features, n_features) if 'tied',
(n_features) if 'diag',
float if 'spherical'
See Also
--------
GaussianMixture : Finite Gaussian mixture fit with EM.
References
----------
.. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine
learning". Vol. 4 No. 4. New York: Springer.
<https://www.springer.com/kr/book/9780387310732>`_
.. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for
Graphical Models". In Advances in Neural Information Processing
Systems 12.
<http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.36.2841&rep=rep1&type=pdf>`_
.. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational
inference for Dirichlet process mixtures". Bayesian analysis 1.1
<https://www.cs.princeton.edu/courses/archive/fall11/cos597C/reading/BleiJordan2005.pdf>`_
"""
@_deprecate_positional_args
def __init__(self, *, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=None,
mean_precision_prior=None, mean_prior=None,
degrees_of_freedom_prior=None, covariance_prior=None,
random_state=None, warm_start=False, verbose=0,
verbose_interval=10, min_cluster_size=0.001):
super().__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval,
min_cluster_size=min_cluster_size)
self.covariance_type = covariance_type
self.weight_concentration_prior_type = weight_concentration_prior_type
self.weight_concentration_prior = weight_concentration_prior
self.mean_precision_prior = mean_precision_prior
self.mean_prior = mean_prior
self.degrees_of_freedom_prior = degrees_of_freedom_prior
self.covariance_prior = covariance_prior
self.min_cluster_size = min_cluster_size
def _check_parameters(self, X):
"""Check that the parameters are well defined.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if (self.weight_concentration_prior_type not in
['dirichlet_process', 'dirichlet_distribution']):
raise ValueError(
"Invalid value for 'weight_concentration_prior_type': %s "
"'weight_concentration_prior_type' should be in "
"['dirichlet_process', 'dirichlet_distribution']"
% self.weight_concentration_prior_type)
self._check_weights_parameters()
self._check_means_parameters(X)
self._check_precision_parameters(X)
self._checkcovariance_prior_parameter(X)
self._check_min_cluster_size_parameter()
def _check_weights_parameters(self):
"""Check the parameter of the Dirichlet distribution."""
if self.weight_concentration_prior is None:
self.weight_concentration_prior_ = 1. / self.n_components
elif self.weight_concentration_prior > 0.:
self.weight_concentration_prior_ = (
self.weight_concentration_prior)
else:
raise ValueError("The parameter 'weight_concentration_prior' "
"should be greater than 0., but got %.3f."
% self.weight_concentration_prior)
def _check_means_parameters(self, X):
"""Check the parameters of the Gaussian distribution.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.mean_precision_prior is None:
self.mean_precision_prior_ = 1.
elif self.mean_precision_prior > 0.:
self.mean_precision_prior_ = self.mean_precision_prior
else:
raise ValueError("The parameter 'mean_precision_prior' should be "
"greater than 0., but got %.3f."
% self.mean_precision_prior)
if self.mean_prior is None:
self.mean_prior_ = X.mean(axis=0)
else:
self.mean_prior_ = check_array(self.mean_prior,
dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.mean_prior_, (n_features, ), 'means')
def _check_precision_parameters(self, X):
"""Check the prior parameters of the precision distribution.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.degrees_of_freedom_prior is None:
self.degrees_of_freedom_prior_ = n_features
elif self.degrees_of_freedom_prior > n_features - 1.:
self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior
else:
raise ValueError("The parameter 'degrees_of_freedom_prior' "
"should be greater than %d, but got %.3f."
% (n_features - 1, self.degrees_of_freedom_prior))
def _checkcovariance_prior_parameter(self, X):
"""Check the `covariance_prior_`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
_, n_features = X.shape
if self.covariance_prior is None:
self.covariance_prior_ = {
'full': np.atleast_2d(np.cov(X.T)),
'tied': np.atleast_2d(np.cov(X.T)),
'diag': np.var(X, axis=0, ddof=1),
'spherical': np.var(X, axis=0, ddof=1).mean()
}[self.covariance_type]
elif self.covariance_type in ['full', 'tied']:
self.covariance_prior_ = check_array(
self.covariance_prior, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.covariance_prior_, (n_features, n_features),
'%s covariance_prior' % self.covariance_type)
_check_precision_matrix(self.covariance_prior_,
self.covariance_type)
elif self.covariance_type == 'diag':
self.covariance_prior_ = check_array(
self.covariance_prior, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(self.covariance_prior_, (n_features,),
'%s covariance_prior' % self.covariance_type)
_check_precision_positivity(self.covariance_prior_,
self.covariance_type)
# spherical case
elif self.covariance_prior > 0.:
self.covariance_prior_ = self.covariance_prior
else:
raise ValueError("The parameter 'spherical covariance_prior' "
"should be greater than 0., but got %.3f."
% self.covariance_prior)
def _check_min_cluster_size_parameter(self):
if self.min_cluster_size < 0 or self.min_cluster_size >= 1:
raise ValueError("The parameter 'min_cluster_size' should"
"be a percent in the range [0,1), but got %.3f." %
self.min_cluster_size)
def _initialize(self, X, resp):
"""Initialization of the mixture parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
nk, xk, sk = _estimate_gaussian_parameters(X, resp, self.reg_covar,
self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
def _estimate_weights(self, nk):
"""Estimate the parameters of the Dirichlet distribution.
Parameters
----------
nk : array-like, shape (n_components,)
"""
if self.weight_concentration_prior_type == 'dirichlet_process':
# For dirichlet process weight_concentration will be a tuple
# containing the two parameters of the beta distribution
self.weight_concentration_ = (
1. + nk,
(self.weight_concentration_prior_ +
np.hstack((np.cumsum(nk[::-1])[-2::-1], 0))))
else:
# case Variationnal Gaussian mixture with dirichlet distribution
self.weight_concentration_ = self.weight_concentration_prior_ + nk
def _estimate_means(self, nk, xk):
"""Estimate the parameters of the Gaussian distribution.
Parameters
----------
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
"""
self.mean_precision_ = self.mean_precision_prior_ + nk
self.means_ = ((self.mean_precision_prior_ * self.mean_prior_ +
nk[:, np.newaxis] * xk) /
self.mean_precision_[:, np.newaxis])
def _estimate_precisions(self, nk, xk, sk):
"""Estimate the precisions parameters of the precision distribution.
Parameters
----------
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like
The shape depends of `covariance_type`:
'full' : (n_components, n_features, n_features)
'tied' : (n_features, n_features)
'diag' : (n_components, n_features)
'spherical' : (n_components,)
"""
{"full": self._estimate_wishart_full,
"tied": self._estimate_wishart_tied,
"diag": self._estimate_wishart_diag,
"spherical": self._estimate_wishart_spherical
}[self.covariance_type](nk, xk, sk)
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
def _estimate_wishart_full(self, nk, xk, sk):
"""Estimate the full Wishart distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components, n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is
# the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
self.covariances_ = np.empty((self.n_components, n_features,
n_features))
for k in range(self.n_components):
diff = xk[k] - self.mean_prior_
self.covariances_[k] = (self.covariance_prior_ + nk[k] * sk[k] +
nk[k] * self.mean_precision_prior_ /
self.mean_precision_[k] * np.outer(diff,
diff))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= (
self.degrees_of_freedom_[:, np.newaxis, np.newaxis])
def _estimate_wishart_tied(self, nk, xk, sk):
"""Estimate the tied Wishart distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_features, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = (
self.degrees_of_freedom_prior_ + nk.sum() / self.n_components)
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + sk * nk.sum() / self.n_components +
self.mean_precision_prior_ / self.n_components * np.dot(
(nk / self.mean_precision_) * diff.T, diff))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
def _estimate_wishart_diag(self, nk, xk, sk):
"""Estimate the diag Wishart distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components, n_features)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + nk[:, np.newaxis] * (
sk + (self.mean_precision_prior_ /
self.mean_precision_)[:, np.newaxis] * np.square(diff)))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis]
def _estimate_wishart_spherical(self, nk, xk, sk):
"""Estimate the spherical Wishart distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
xk : array-like, shape (n_components, n_features)
sk : array-like, shape (n_components,)
"""
_, n_features = xk.shape
# Warning : in some Bishop book, there is a typo on the formula 10.63
# `degrees_of_freedom_k = degrees_of_freedom_0 + Nk`
# is the correct formula
self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk
diff = xk - self.mean_prior_
self.covariances_ = (
self.covariance_prior_ + nk * (
sk + self.mean_precision_prior_ / self.mean_precision_ *
np.mean(np.square(diff), 1)))
# Contrary to the original bishop book, we normalize the covariances
self.covariances_ /= self.degrees_of_freedom_
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
n_samples, _ = X.shape
nk, xk, sk = _estimate_gaussian_parameters(
X, np.exp(log_resp), self.reg_covar, self.covariance_type)
self._estimate_weights(nk)
self._estimate_means(nk, xk)
self._estimate_precisions(nk, xk, sk)
def _estimate_log_weights(self):
if self.weight_concentration_prior_type == 'dirichlet_process':
digamma_sum = digamma(self.weight_concentration_[0] +
self.weight_concentration_[1])
digamma_a = digamma(self.weight_concentration_[0])
digamma_b = digamma(self.weight_concentration_[1])
return (digamma_a - digamma_sum +
np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1])))
else:
# case Variationnal Gaussian mixture with dirichlet distribution
return (digamma(self.weight_concentration_) -
digamma(np.sum(self.weight_concentration_)))
def _estimate_log_prob(self, X):
_, n_features = X.shape
# We remove `n_features * np.log(self.degrees_of_freedom_)` because
# the precision matrix is normalized
log_gauss = (_estimate_log_gaussian_prob(
X, self.means_, self.precisions_cholesky_, self.covariance_type) -
.5 * n_features * np.log(self.degrees_of_freedom_))
log_lambda = n_features * np.log(2.) + np.sum(digamma(
.5 * (self.degrees_of_freedom_ -
np.arange(0, n_features)[:, np.newaxis])), 0)
return log_gauss + .5 * (log_lambda -
n_features / self.mean_precision_)
def _compute_lower_bound(self, log_resp, log_prob_norm):
"""Estimate the lower bound of the model.
The lower bound on the likelihood (of the training data with respect to
the model) is used to detect the convergence and has to decrease at
each iteration.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
log_prob_norm : float
Logarithm of the probability of each sample in X.
Returns
-------
lower_bound : float
"""
# Contrary to the original formula, we have done some simplification
# and removed all the constant terms.
n_features, = self.mean_prior_.shape
# We removed `.5 * n_features * np.log(self.degrees_of_freedom_)`
# because the precision matrix is normalized.
log_det_precisions_chol = (_compute_log_det_cholesky(
self.precisions_cholesky_, self.covariance_type, n_features) -
.5 * n_features * np.log(self.degrees_of_freedom_))
if self.covariance_type == 'tied':
log_wishart = self.n_components * np.float64(_log_wishart_norm(
self.degrees_of_freedom_, log_det_precisions_chol, n_features))
else:
log_wishart = np.sum(_log_wishart_norm(
self.degrees_of_freedom_, log_det_precisions_chol, n_features))
if self.weight_concentration_prior_type == 'dirichlet_process':
log_norm_weight = -np.sum(betaln(self.weight_concentration_[0],
self.weight_concentration_[1]))
else:
log_norm_weight = _log_dirichlet_norm(self.weight_concentration_)
return (-np.sum(np.exp(log_resp) * log_resp) -
log_wishart - log_norm_weight -
0.5 * n_features * np.sum(np.log(self.mean_precision_)))
def _get_parameters(self):
return (self.weight_concentration_,
self.mean_precision_, self.means_,
self.degrees_of_freedom_, self.covariances_,
self.precisions_cholesky_)
def _set_parameters(self, params):
(self.weight_concentration_, self.mean_precision_, self.means_,
self.degrees_of_freedom_, self.covariances_,
self.precisions_cholesky_) = params
# Weights computation
if self.weight_concentration_prior_type == "dirichlet_process":
weight_dirichlet_sum = (self.weight_concentration_[0] +
self.weight_concentration_[1])
tmp = self.weight_concentration_[1] / weight_dirichlet_sum
self.weights_ = (
self.weight_concentration_[0] / weight_dirichlet_sum *
np.hstack((1, np.cumprod(tmp[:-1]))))
self.weights_ /= np.sum(self.weights_)
else:
self. weights_ = (self.weight_concentration_ /
np.sum(self.weight_concentration_))
# Precisions matrices computation
if self.covariance_type == 'full':
self.precisions_ = np.array([
np.dot(prec_chol, prec_chol.T)
for prec_chol in self.precisions_cholesky_])
elif self.covariance_type == 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
| 43.162673
| 98
| 0.616951
|
e93d65770f0f554b0eead56af918ca135e612609
| 5,495
|
py
|
Python
|
tensorflow/python/keras/utils/version_utils_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 190,993
|
2015-11-09T13:17:30.000Z
|
2022-03-31T23:05:27.000Z
|
tensorflow/python/keras/utils/version_utils_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 48,461
|
2015-11-09T14:21:11.000Z
|
2022-03-31T23:17:33.000Z
|
tensorflow/python/keras/utils/version_utils_test.py
|
EricRemmerswaal/tensorflow
|
141ff27877579c81a213fa113bd1b474c1749aca
|
[
"Apache-2.0"
] | 104,981
|
2015-11-09T13:40:17.000Z
|
2022-03-31T19:51:54.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras utilities to split v1 and v2 classes."""
import abc
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_v1
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine import training_v1
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class SplitUtilsTest(keras_parameterized.TestCase):
def _check_model_class(self, model_class):
if ops.executing_eagerly_outside_functions():
self.assertEqual(model_class, training.Model)
else:
self.assertEqual(model_class, training_v1.Model)
def _check_layer_class(self, layer):
if ops.executing_eagerly_outside_functions():
self.assertIsInstance(layer, base_layer.Layer)
self.assertNotIsInstance(layer, base_layer_v1.Layer)
else:
self.assertIsInstance(layer, base_layer_v1.Layer)
def test_functional_model(self):
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
self._check_model_class(model.__class__.__bases__[0])
self._check_layer_class(model)
def test_subclass_model_with_functional_init(self):
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
class MyModel(keras.Model):
pass
model = MyModel(inputs, outputs)
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_subclass_model_with_functional_init_interleaved_v1_functional(self):
with ops.Graph().as_default():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
_ = keras.Model(inputs, outputs)
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
class MyModel(keras.Model):
pass
model = MyModel(inputs, outputs)
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_sequential_model(self):
model = keras.Sequential([keras.layers.Dense(1)])
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_subclass_model(self):
class MyModel(keras.Model):
def call(self, x):
return 2 * x
model = MyModel()
model_class = model.__class__.__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_layer(self):
class IdentityLayer(base_layer.Layer):
"""A layer that returns it's input.
Useful for testing a layer without a variable.
"""
def call(self, inputs):
return inputs
layer = IdentityLayer()
self._check_layer_class(layer)
def test_multiple_subclass_model(self):
class Model1(keras.Model):
pass
class Model2(Model1):
def call(self, x):
return 2 * x
model = Model2()
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_user_provided_metaclass(self):
class AbstractModel(keras.Model, metaclass=abc.ABCMeta):
@abc.abstractmethod
def call(self, inputs):
"""Calls the model."""
class MyModel(AbstractModel):
def call(self, inputs):
return 2 * inputs
with self.assertRaisesRegex(TypeError, 'instantiate abstract class'):
AbstractModel() # pylint: disable=abstract-class-instantiated
model = MyModel()
model_class = model.__class__.__bases__[0].__bases__[0]
self._check_model_class(model_class)
self._check_layer_class(model)
def test_multiple_inheritance(self):
class Return2(object):
def return_2(self):
return 2
class MyModel(keras.Model, Return2):
def call(self, x):
return self.return_2() * x
model = MyModel()
bases = model.__class__.__bases__
self._check_model_class(bases[0])
self.assertEqual(bases[1], Return2)
self.assertEqual(model.return_2(), 2)
self._check_layer_class(model)
def test_fit_error(self):
if not ops.executing_eagerly_outside_functions():
# Error only appears on the v2 class.
return
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
x, y = np.ones((10, 10)), np.ones((10, 1))
with ops.get_default_graph().as_default():
with self.assertRaisesRegex(
ValueError, 'instance was constructed with eager mode enabled'):
model.fit(x, y, batch_size=2)
if __name__ == '__main__':
test.main()
| 29.702703
| 80
| 0.709372
|
1b436334bc8e240a39d933c5919a621bb0ee0f80
| 913
|
py
|
Python
|
mediawiki/__init__.py
|
arnavk/mediawiki
|
2f74f80e18489bb3d22de3977fc2a8e5ba2c0737
|
[
"MIT"
] | null | null | null |
mediawiki/__init__.py
|
arnavk/mediawiki
|
2f74f80e18489bb3d22de3977fc2a8e5ba2c0737
|
[
"MIT"
] | 1
|
2018-09-19T18:56:06.000Z
|
2018-09-19T18:56:06.000Z
|
mediawiki/__init__.py
|
arnavk/mediawiki
|
2f74f80e18489bb3d22de3977fc2a8e5ba2c0737
|
[
"MIT"
] | null | null | null |
'''
mediawiki module initialization
'''
from __future__ import (unicode_literals, absolute_import)
from .mediawiki import (MediaWiki, URL, VERSION)
from .mediawikipage import (MediaWikiPage)
from .exceptions import (MediaWikiException, PageError, MediaWikiGeoCoordError,
RedirectError, DisambiguationError,
MediaWikiAPIURLError, HTTPTimeoutError,
MediaWikiCategoryTreeError)
__author__ = 'Tyler Barrus'
__maintainer__ = 'Tyler Barrus'
__email__ = 'barrust@gmail.com'
__license__ = 'MIT'
__version__ = VERSION
__credits__ = ['Jonathan Goldsmith']
__url__ = URL
__bugtrack_url__ = '{0}/issues'.format(URL)
__all__ = ['MediaWiki', 'PageError', 'RedirectError', 'MediaWikiException',
'DisambiguationError', 'MediaWikiAPIURLError',
'HTTPTimeoutError', 'MediaWikiGeoCoordError',
'MediaWikiCategoryTreeError']
| 36.52
| 79
| 0.709748
|
91de5af1966701dd6344a6f0a300ea28d1f41974
| 6,834
|
py
|
Python
|
backend/sources/tests/unit/test_routers/test_form_router.py
|
TeamUnibuc/SmartForms
|
db558b057468dafd3adbb85d6405697f8c7b7342
|
[
"MIT"
] | null | null | null |
backend/sources/tests/unit/test_routers/test_form_router.py
|
TeamUnibuc/SmartForms
|
db558b057468dafd3adbb85d6405697f8c7b7342
|
[
"MIT"
] | 2
|
2022-02-18T09:43:17.000Z
|
2022-03-02T23:18:03.000Z
|
backend/sources/tests/unit/test_routers/test_form_router.py
|
TeamUnibuc/SmartForms
|
db558b057468dafd3adbb85d6405697f8c7b7342
|
[
"MIT"
] | null | null | null |
import base64
import json
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))
import main
import time
import random
import unittest
from main import init_state
import database
import smart_forms_types
from PyPDF2 import PdfFileReader
from fastapi.testclient import TestClient
import io
def get_generic_form_description():
"""
returns a generic description used for testing
contains 2 questions. First is a text question, second is a multiple choice question
"""
return smart_forms_types.FormDescription(
title="form_title",
formId="formId",
description="Description",
questions=[
smart_forms_types.FormTextQuestion(
title="question_title",
description="question description",
maxAnswerLength=12
),
smart_forms_types.FormMultipleChoiceQuestion(
title="question2_title",
description="question 2 description",
choices = ["Yes", "No", "Maybe"]
)
],
canBeFilledOnline=True,
needsToBeSignedInToSubmit=False,
authorEmail=True
)
class TestFormEndpointNoAuthChecks(unittest.TestCase):
def setUpClass() -> None:
init_state()
# disable authentication checks
main.routers.AUTHENTICATION_CHECKS = False
def setUp(self) -> None:
self.client = TestClient(main.app)
def test_preview_endpoint(self):
"""
Send a correct message and expect to receive a valid answer.
"""
form = get_generic_form_description()
response = self.client.post("/api/form/preview", json=form.dict())
self.assertEqual(response.status_code, 200)
self.assertTrue("formPdfBase64" in json.loads(response.content))
def test_create_and_description_endpoint(self):
"""
Send a correct message and expect to receive a valid answer.
"""
form = get_generic_form_description()
response = self.client.post("/api/form/create", json=form.dict())
self.assertEqual(response.status_code, 200)
content = response.json()
self.assertTrue("formPdfBase64" in content)
self.assertTrue("formId" in content)
form_id = response.json()["formId"]
# try to get form from /description
response_description = self.client.get(
f"/api/form/description/{form_id}"
)
self.assertEqual(response_description.status_code, 200)
extracted_form = smart_forms_types.FormDescription(
**response_description.json()
)
self.assertEqual(extracted_form.formId, form_id)
self.assertIsNotNone(extracted_form.creationDate)
def test_list_endpoint(self):
"""
Creates a form, and expects list to return at least one form.
"""
form = get_generic_form_description()
response = self.client.post("/api/form/create", json=form.dict())
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertTrue("formId" in content)
# try to get form from /list
response = self.client.post(
f"/api/form/list",
json={
"offset": 0,
"count": 10,
"isOwner": False,
}
)
self.assertEqual(response.status_code, 200)
self.assertTrue("forms" in response.json())
self.assertGreater(len(response.json()["forms"]), 0)
def test_pdf_endpoint(self):
"""
Creates a form, and tries to retrieve it.
"""
form = get_generic_form_description()
response = self.client.post("/api/form/create", json=form.dict())
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertTrue("formId" in content)
form_id = content["formId"]
# try to get form from /list
response = self.client.get(
f"/api/form/pdf/{form_id}"
)
content = base64.b64decode(
response.json()["formPdfBase64"]
)
# try to read content as pdf
pdf = PdfFileReader(io.BytesIO(content))
# shouldn't be none
self.assertIsNotNone(pdf)
# should have one page
self.assertEqual(pdf.numPages, 1)
# should contain the title (form_title)
self.assertNotEqual(
pdf.getPage(0).extractText().find("form_title"),
-1
)
def test_delete_endpoint(self):
"""
Creates a form, and tries to delete it.
"""
form = get_generic_form_description()
response = self.client.post("/api/form/create", json=form.dict())
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertTrue("formId" in content)
form_id = content["formId"]
# try to get form from /list
response = self.client.delete(
f"/api/form/delete/{form_id}"
)
# no errors deleting the form
self.assertEqual(response.status_code, 200)
# try to read the form again
response = self.client.get(
f"/api/form/description/{form_id}"
)
# should get NOT FOUND
self.assertEqual(response.status_code, 201)
def test_visibility_endpoint(self):
"""
Creates a form, and tries to delete it.
"""
form = get_generic_form_description()
form.canBeFilledOnline = False
form.needsToBeSignedInToSubmit = False
response = self.client.post("/api/form/create", json=form.dict())
self.assertEqual(response.status_code, 200)
form_id = response.json()["formId"]
# try to get form from /list
response = self.client.put(
f"/api/form/online-access/{form_id}",
json={
"canBeFilledOnline": True,
"needsToBeSignedInToSubmit": True
}
)
# no errors deleting the form
self.assertEqual(response.status_code, 200)
# try to read the form again
response = self.client.get(
f"/api/form/description/{form_id}"
)
# should get 200
self.assertEqual(response.status_code, 200)
extracted_form = smart_forms_types.FormDescription(
**response.json()
)
# should be the same, exept the two fields
self.assertEqual(extracted_form.formId, form_id)
self.assertTrue(extracted_form.needsToBeSignedInToSubmit)
self.assertTrue(extracted_form.canBeFilledOnline)
if __name__ == '__main__':
unittest.main()
| 30.783784
| 111
| 0.613257
|
546397f4cae0674d2db8e17766f65e23645c886d
| 6,452
|
py
|
Python
|
test/test_samples.py
|
ricky40403/Fcos_seg
|
e2803108ca0114b906c739aa348e8b28f3c6342c
|
[
"MIT"
] | null | null | null |
test/test_samples.py
|
ricky40403/Fcos_seg
|
e2803108ca0114b906c739aa348e8b28f3c6342c
|
[
"MIT"
] | 1
|
2021-09-29T17:10:01.000Z
|
2021-09-29T17:10:01.000Z
|
test/test_samples.py
|
ricky40403/Fcos_seg
|
e2803108ca0114b906c739aa348e8b28f3c6342c
|
[
"MIT"
] | null | null | null |
import os
import sys
import argparse
import torch
sys.path.append('..')
from Fcos_seg.data.coco_dataset import COCODataset
from Fcos_seg.data.transform import get_transform
from Fcos_seg.data.data_meta import COCO_CLASS_NAME
from Fcos_seg.core.config import get_cfg_defaults
from Fcos_seg.utils.sampler_helper import get_sampler, make_batch_data_sampler
from Fcos_seg.utils.dataset_helper import collate_fn
from Fcos_seg.utils.norm_helper import UnNormalize
from Fcos_seg.loss.fcos_loss import FcosLoss
from Fcos_seg.detector.fcos import FCOS
from PIL import Image, ImageDraw, ImageFont
import torchvision.transforms as transforms
def parse_args():
parser = argparse.ArgumentParser(
description="Test COCO dataset"
)
parser.add_argument('data', metavar='DIR',
help='path to coco folder')
return parser, parser.parse_args()
def main():
_, args = parse_args()
cfg = get_cfg_defaults()
COCO_train_img_PATH = os.path.join(args.data, "images", "train2017")
COCO_train_xml_PATH = os.path.join(args.data, "annotations", "instances_train2017.json")
COCO_val_img_PATH = os.path.join(args.data, "images", "val2017")
COCO_val_xml_PATH = os.path.join(args.data, "annotations", "instances_val2017.json")
train_dataset = COCODataset(cfg, COCO_train_xml_PATH, COCO_train_img_PATH, True, transform= get_transform(cfg, train = True))
# val_dataset = COCODataset(cfg, COCO_val_xml_PATH, COCO_val_img_PATH, True, transform= get_transform(cfg, train = False))
train_sampler = get_sampler(train_dataset, shuffle = False, distributed = False)
# val_sampler = get_sampler(val_dataset, shuffle = False, distributed = False)
batch_per_gpu = cfg.TRAIN.BATCH
train_sampler = make_batch_data_sampler(train_dataset, train_sampler, aspect_grouping = [1], images_per_batch = batch_per_gpu)
# val_sampler = make_batch_data_sampler(val_dataset, val_sampler, aspect_grouping = [1], images_per_batch = batch_per_gpu)
train_output_path = "test_coco_dataset/train_samples"
# val_output_path = "test_coco_dataset/val"
print("==> testing output folder for train: {}".format(train_output_path))
# print("==> testing output folder for val: {}".format(val_output_path))
if not os.path.exists(train_output_path):
os.makedirs(train_output_path)
# if not os.path.exists(val_output_path):
# os.makedirs(val_output_path)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_sampler = train_sampler,
num_workers = cfg.TRAIN.WORKER,
collate_fn = collate_fn(cfg))
# val_loader = torch.utils.data.DataLoader(val_dataset,
# batch_sampler = val_sampler,
# num_workers = cfg.TRAIN.WORKER,
# collate_fn = collate_fn(cfg))
# only test 100 image
iter_num = 10
cur_iter_num = 0
un_norm = UnNormalize(mean = (103.53, 116.28, 123.675), std = (57.375, 57.12, 58.395))
model = FCOS(cfg)
test_loss_sample = FcosLoss(cfg)
# testing train dataset
for imgs, targets, idx in train_loader:
if cur_iter_num >= iter_num:
break
locations = model.get_locations(imgs, targets)
cls_sample, reg_sample, locations = test_loss_sample.test_target_samples(locations, targets, imgs)
for l in range(len(locations)):
cls_sample_per_level = cls_sample[l]
reg_sample_per_level = reg_sample[l]
img_per_batch = imgs.tensors[0].clone()
img_per_batch = img_per_batch * 0
PIL_image = transforms.ToPILImage()(img_per_batch)
drawObj = ImageDraw.Draw(PIL_image)
for loc in locations[l]:
drawObj.rectangle((loc[0]-2, loc[1]-2,loc[0]+2, loc[1]+2), outline="red")
PIL_image.save(os.path.join(train_output_path, "samplelocation_level{}.jpg".format(l)))
for b in range(len(cls_sample)):
img_per_batch = imgs.tensors[b].clone()
img_per_batch = un_norm(img_per_batch)
im_w, im_h = imgs.image_sizes[b]
img_per_batch = img_per_batch[:, :im_w, :im_h]
# batch_cls_per_level = cls_sample_per_level[b]
# batch_reg_per_level = reg_sample_per_level[b]
f_size = locations[l].shape[0]
batch_cls_per_level = cls_sample_per_level[b*f_size:(b+1)*f_size]
batch_reg_per_level = reg_sample_per_level[b*f_size:(b+1)*f_size,:]
print(batch_cls_per_level.size())
print(batch_reg_per_level.size())
pos_inds = torch.nonzero(batch_cls_per_level > 0).squeeze(1)
PIL_image = transforms.ToPILImage()(img_per_batch)
drawObj = ImageDraw.Draw(PIL_image)
for p in pos_inds:
x1 = (locations[l][p][0] - batch_reg_per_level[p, 0]).cpu()
y1 = (locations[l][p][1] - batch_reg_per_level[p, 2]).cpu()
x2 = (locations[l][p][0] + batch_reg_per_level[p, 1]).cpu()
y2 = (locations[l][p][1] + batch_reg_per_level[p, 3]).cpu()
drawObj.rectangle((x1, y1, x2, y2), outline="red")
drawObj.text((x1, y1), "Class: {}".format(COCO_CLASS_NAME[int(batch_cls_per_level[p])]), fill="red")
PIL_image.save(os.path.join(train_output_path, "sample_{}_level{}.jpg".format(idx[b], l)))
exit()
if __name__ == "__main__":
main()
| 36.451977
| 134
| 0.565871
|
cfd5c81386506bd99f8876433d93823b61628a8b
| 920
|
py
|
Python
|
preprocess/geocode.py
|
code-for-magdeburg/history-of-your-house
|
07f0dc04eb0259ad8d9c86034c5c2231a6922f8d
|
[
"MIT"
] | 1
|
2018-04-28T20:24:48.000Z
|
2018-04-28T20:24:48.000Z
|
preprocess/geocode.py
|
code-for-magdeburg/history-of-your-house
|
07f0dc04eb0259ad8d9c86034c5c2231a6922f8d
|
[
"MIT"
] | null | null | null |
preprocess/geocode.py
|
code-for-magdeburg/history-of-your-house
|
07f0dc04eb0259ad8d9c86034c5c2231a6922f8d
|
[
"MIT"
] | null | null | null |
import dataset
import requests
import time
db = dataset.connect('sqlite:///databse.db')
table = db['history']
key = "XAI2eQAhITGGep9PYpBj7ZQM90NtKSpd"
baseUrl = "http://www.mapquestapi.com/geocoding/v1/address?street=%s&city=Magdeburg&country=Germany&maxResults=1&key=" + key
def geocode(row):
address = row['anschrift']
if row['lat'] == None:
print(address)
url = baseUrl % address
response = requests.get(url).json()
lat = response['results'][0]['locations'][0]['latLng']['lat']
lng = response['results'][0]['locations'][0]['latLng']['lng']
row['lng'] = lng
row['lat'] = lat
table.update(row, ['id'])
# time.sleep(0.5)
else:
print(row['lat'])
for row in list(table.all()):
geocode(row)
# dataset.freeze(table, format='csv', filename='history.csv')
dataset.freeze(table, format='json', filename='history.json')
| 24.210526
| 124
| 0.620652
|
9e96da72b21911bfe4eb357c52c55d5591ffe000
| 2,017
|
py
|
Python
|
nahash/messages.py
|
jwiggins/nahash
|
51a41ed5e02c4414783ec4e301fb9c12a4a403ae
|
[
"MIT"
] | 3
|
2018-06-13T00:42:14.000Z
|
2021-04-22T00:50:08.000Z
|
nahash/messages.py
|
jwiggins/nahash
|
51a41ed5e02c4414783ec4e301fb9c12a4a403ae
|
[
"MIT"
] | 1
|
2019-11-27T10:04:16.000Z
|
2020-03-03T16:38:09.000Z
|
nahash/messages.py
|
jwiggins/nahash
|
51a41ed5e02c4414783ec4e301fb9c12a4a403ae
|
[
"MIT"
] | null | null | null |
import subprocess
import textwrap
import time
from .tables import MESSAGE_HANDLE_ID, MESSAGE_ROWID, MESSAGE_TEXT
from .util import get_db_conn
WAIT_TIMEOUT = 5.0
def send_message(recipient, text):
""" Send a single message to a recipient
Original: https://gist.github.com/tk512/9b578739730eb5b83c7cbf502c38cf3a
"""
script = textwrap.dedent("""
on run {targetBuddyPhone, targetMessage}
tell application "Messages"
set targetService to 1st service whose service type = iMessage
set targetBuddy to buddy targetBuddyPhone of targetService
send targetMessage to targetBuddy
end tell
end run
""")
command = ['osascript', '-', recipient.phone_or_email, text]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.communicate(script.encode('utf8'))
return proc.wait()
def wait_for_next_message(recipients, last_rowid=0):
""" Poll forever until a message arrives from any of `recipients`.
"""
if not isinstance(recipients, (list, tuple)):
recipients = (recipients,)
# Turn recipients into a dictionary
recipients = {r.index: r for r in recipients}
condition = 'is_from_me=0 AND handle_id IN ( {} )'
condition = condition.format(', '.join(map(str, recipients.keys())))
sql = ('SELECT * FROM `message` '
'WHERE ' + condition +
'ORDER BY ROWID DESC '
'LIMIT 1')
connection = get_db_conn()
with connection:
c = connection.cursor()
while True:
time.sleep(WAIT_TIMEOUT)
c.execute(sql)
row = c.fetchone()
# Nothing there yet
if row is None:
continue
if last_rowid == 0:
last_rowid = row[MESSAGE_ROWID]
elif row[MESSAGE_ROWID] > last_rowid:
r_index = row[MESSAGE_HANDLE_ID]
row_id = row[MESSAGE_ROWID]
return row[MESSAGE_TEXT], recipients[r_index], row_id
| 30.104478
| 76
| 0.627169
|
2f143e59638a462a78589d830766ed148d50e92e
| 1,334
|
py
|
Python
|
clock/contrib/sites/migrations/0001_initial.py
|
mimischi/clock
|
3914da6a48b89cb80ab5205c6ce1c279012472fe
|
[
"MIT"
] | 6
|
2016-03-02T14:20:14.000Z
|
2021-01-04T04:30:27.000Z
|
clock/contrib/sites/migrations/0001_initial.py
|
mimischi/clock
|
3914da6a48b89cb80ab5205c6ce1c279012472fe
|
[
"MIT"
] | 340
|
2015-09-24T15:30:12.000Z
|
2020-03-14T09:05:40.000Z
|
clock/contrib/sites/migrations/0001_initial.py
|
mimischi/clock
|
3914da6a48b89cb80ab5205c6ce1c279012472fe
|
[
"MIT"
] | 4
|
2016-04-04T13:18:18.000Z
|
2018-08-02T02:16:19.000Z
|
# -*- coding: utf-8 -*-
import django.contrib.sites.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Site",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
primary_key=True,
serialize=False,
auto_created=True,
),
),
(
"domain",
models.CharField(
verbose_name="domain name",
max_length=100,
validators=[
django.contrib.sites.models._simple_domain_name_validator
],
),
),
("name", models.CharField(verbose_name="display name", max_length=50)),
],
options={
"verbose_name_plural": "sites",
"verbose_name": "site",
"db_table": "django_site",
"ordering": ("domain",),
},
managers=[("objects", django.contrib.sites.models.SiteManager())],
)
]
| 29
| 87
| 0.407046
|
91fab78c4d322247bbc36cb8d278166efcb2710f
| 3,442
|
py
|
Python
|
examples/http/ghcn/get_ghcn.py
|
jjaraalm/hsds
|
936c9033fbbffcfe5093dd55c4f2992458ac2df5
|
[
"Apache-2.0"
] | 1
|
2020-03-12T12:26:26.000Z
|
2020-03-12T12:26:26.000Z
|
examples/http/ghcn/get_ghcn.py
|
jjaraalm/hsds
|
936c9033fbbffcfe5093dd55c4f2992458ac2df5
|
[
"Apache-2.0"
] | null | null | null |
examples/http/ghcn/get_ghcn.py
|
jjaraalm/hsds
|
936c9033fbbffcfe5093dd55c4f2992458ac2df5
|
[
"Apache-2.0"
] | null | null | null |
import sys
import json
import requests
import helper
import config
import hsds_logger as log
def printUsage():
print("Usage: get_ghcn.py [station] [obstype]")
print(" Get ghcn results for given station and obstype")
print(" If command line arguments are not given, default to:")
print(" station=ITE00100554")
print(" obstype=TMAX")
def main():
domain = "/home/" + config.get("user_name") + "/" + config.get("domain_name")
print("domain: {}".format(domain) )
station = "ITE00100554"
obstype = "TMAX"
if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
printUsage()
sys.exit(1)
if len(sys.argv) > 1:
station = sys.argv[1].upper()
if len(sys.argv) > 2:
obstype = sys.argv[2].upper()
print("Station: ", station)
print("Obstype: ", obstype)
# get root uuid
req = helper.getEndpoint() + '/'
headers = helper.getRequestHeaders(domain=domain)
log.info("Req: " + req)
rsp = requests.get(req, headers=headers)
root_uuid = None
if rsp.status_code == 200:
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
log.info("root uuid: " + root_uuid)
# get data uuid
data_uuid = None
req = helper.getEndpoint() + '/groups/' + root_uuid + "/links/data"
log.info("Req: " + req)
rsp = requests.get(req, headers=headers)
if rsp.status_code == 200:
rspJson = json.loads(rsp.text)
link_json = rspJson["link"]
data_uuid = link_json["id"]
log.info("data uuid: " + data_uuid)
#get station uuid
station_uuid = None
req = helper.getEndpoint() + '/groups/' + data_uuid + "/links/" + station
log.info("Req: " + req)
rsp = requests.get(req, headers=headers)
if rsp.status_code == 200:
rspJson = json.loads(rsp.text)
link_json = rspJson["link"]
station_uuid = link_json["id"]
elif rsp.status_code == 404:
print("Station not found")
sys.exit()
else:
print("unexpected error: ", rsp.status_code)
sys.exit()
log.info("station uuid: " + station_uuid)
#get obs uuid
obs_uuid = None
req = helper.getEndpoint() + '/groups/' + station_uuid + "/links/" + obstype
log.info("Req: " + req)
rsp = requests.get(req, headers=headers)
if rsp.status_code == 200:
rspJson = json.loads(rsp.text)
link_json = rspJson["link"]
obs_uuid = link_json["id"]
elif rsp.status_code == 404:
print("No observation of requested type")
sys.exit()
else:
print("unexpected error: ", rsp.status_code)
sys.exit()
log.info("obs uuid: " + obs_uuid)
# finally get the actual observations
attrs = None
req = helper.getEndpoint() + '/groups/' + obs_uuid + "/attributes?IncludeData=T"
log.info("Req: " + req)
rsp = requests.get(req, headers=headers)
if rsp.status_code == 200:
rspJson = json.loads(rsp.text)
attrs = rspJson["attributes"]
else:
print("unexpected error: ", rsp.status_code)
sys.exit()
print("{} observations".format(len(attrs)))
# convert list of attributes to a dict of data/value pairs
data = {}
for attr in attrs:
data[attr["name"]] = attr["value"]
keys = list(data.keys())
keys.sort()
for k in keys:
print("{}: {}".format(k, data[k]))
main()
| 28.92437
| 84
| 0.590354
|
001d5fa7914ac83f78b65f038d6f04de7dcd2f92
| 1,701
|
py
|
Python
|
poem/Poem/poem/migrations/0006_groupofthresholdsprofiles_thresholdsprofiles.py
|
kevangel79/poem-2
|
75cda3cdd302df9c85b963bd91b7ce7182dfa220
|
[
"Apache-2.0"
] | null | null | null |
poem/Poem/poem/migrations/0006_groupofthresholdsprofiles_thresholdsprofiles.py
|
kevangel79/poem-2
|
75cda3cdd302df9c85b963bd91b7ce7182dfa220
|
[
"Apache-2.0"
] | 139
|
2020-04-06T09:22:16.000Z
|
2021-08-02T06:39:22.000Z
|
poem/Poem/poem/migrations/0006_groupofthresholdsprofiles_thresholdsprofiles.py
|
kevangel79/poem-2
|
75cda3cdd302df9c85b963bd91b7ce7182dfa220
|
[
"Apache-2.0"
] | 3
|
2019-07-10T09:37:38.000Z
|
2020-04-02T10:48:38.000Z
|
# Generated by Django 2.2.5 on 2019-11-06 11:16
import django.contrib.auth.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0011_update_proxy_permissions'),
('poem', '0005_tenanthistory'),
]
operations = [
migrations.CreateModel(
name='ThresholdsProfiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('apiid', models.CharField(max_length=128)),
('groupname', models.CharField(default='', max_length=128)),
],
options={
'permissions': (('thresholdsprofilesown', 'Read/Write/Modify'),),
},
),
migrations.CreateModel(
name='GroupOfThresholdsProfiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80, unique=True, verbose_name='name')),
('permissions', models.ManyToManyField(blank=True, to='auth.Permission', verbose_name='permissions')),
('thresholdsprofiles', models.ManyToManyField(blank=True, to='poem.ThresholdsProfiles')),
],
options={
'verbose_name': 'Group of thresholds profiles',
'verbose_name_plural': 'Groups of thresholds profiles',
},
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
]
| 38.659091
| 118
| 0.573192
|
4f39989fff61d1fa4b37977cfa1bf5bc5a354a41
| 442
|
py
|
Python
|
examples/fetch_tabix/fetch_tabix.py
|
k8iechen/vcfpy
|
89f7097cb5bc7bfec198d8c02fe031b44786b14b
|
[
"MIT"
] | 48
|
2016-09-25T16:07:53.000Z
|
2022-01-19T17:44:46.000Z
|
examples/fetch_tabix/fetch_tabix.py
|
k8iechen/vcfpy
|
89f7097cb5bc7bfec198d8c02fe031b44786b14b
|
[
"MIT"
] | 83
|
2016-09-16T12:20:02.000Z
|
2021-05-12T09:12:10.000Z
|
examples/fetch_tabix/fetch_tabix.py
|
k8iechen/vcfpy
|
89f7097cb5bc7bfec198d8c02fe031b44786b14b
|
[
"MIT"
] | 9
|
2016-10-04T15:10:06.000Z
|
2022-02-08T21:10:51.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vcfpy
# Open input, add FILTER header, and open output file
reader = vcfpy.Reader.from_path("input.vcf.gz")
writer = vcfpy.Writer.from_path("/dev/stdout", reader.header)
# Fetch region 20:1,110,694-1,230,237. Note that the coordinates
# in the API call are zero-based and describe half-open intervals.
for record in reader.fetch("20", 1110695, 1230237):
writer.write_record(record)
| 34
| 66
| 0.730769
|
76e57e77adb623fa451995104b08d00acd7bc02e
| 1,319
|
py
|
Python
|
work/PaddleVideo/paddlevideo/modeling/__init__.py
|
zmgaoscut/2021-CCF-BDCI---7-
|
bdf8139d840a31120f62c9f87542076a3db813ac
|
[
"Apache-2.0"
] | 883
|
2020-11-12T11:46:46.000Z
|
2022-03-31T18:27:10.000Z
|
paddlevideo/modeling/__init__.py
|
arkofgalaxy/PaddleVideo
|
64251233c83b7eb681061b454da198a9082309a6
|
[
"Apache-2.0"
] | 233
|
2020-12-09T06:04:59.000Z
|
2022-03-28T08:16:51.000Z
|
paddlevideo/modeling/__init__.py
|
arkofgalaxy/PaddleVideo
|
64251233c83b7eb681061b454da198a9082309a6
|
[
"Apache-2.0"
] | 225
|
2020-11-13T06:21:55.000Z
|
2022-03-31T05:36:11.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .backbones import ResNet
from .builder import (build_backbone, build_head, build_recognizer,
build_localizer, build_loss)
from .heads import BaseHead, TSNHead
from .losses import CrossEntropyLoss
from .framework.recognizers import BaseRecognizer, recognizer2d
from .registry import BACKBONES, HEADS, LOSSES, RECOGNIZERS, LOCALIZERS
from .weight_init import weight_init_
__all__ = [
'BACKBONES',
'HEADS',
'RECOGNIZERS',
'LOCALIZERS',
'LOSSES',
'build_recognizer',
'build_localizer',
'build_head',
'build_backbone',
'build_loss',
'ResNet',
'TSNHead',
'BaseHead',
'BaseRecognizer',
'Recognizer2d',
'CrossEntropyLoss',
]
| 31.404762
| 74
| 0.727066
|
4a5e6dd36ca47bb2b91a79417c7c470af14d5528
| 6,020
|
py
|
Python
|
data/p3BR/R2/benchmark/startQiskit_QC154.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit_QC154.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startQiskit_QC154.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=28
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.x(input_qubit[2]) # number=12
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC154.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.950249
| 140
| 0.631561
|
7618c197413d6dbde4c9b102fff4171ca1b06f72
| 2,307
|
py
|
Python
|
tests/ut/python/parallel/test_auto_parallel_matmul_drop.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/ut/python/parallel/test_auto_parallel_matmul_drop.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/ut/python/parallel/test_auto_parallel_matmul_drop.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _cell_graph_executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss
grad_all = C.GradOperation(get_all=True)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y, b):
predict = self.network(x, y, b)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, b):
return grad_all(self.network)(x, y, b)
# model_parallel test
def test_two_matmul_dropout():
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.matmul1 = P.MatMul()
self.dropout = nn.Dropout()
self.matmul2 = P.MatMul()
def construct(self, x, y, b):
out = self.matmul1(x, y)
out = self.dropout(out)
out = self.matmul2(out, b)
return out
context.set_auto_parallel_context(device_num=8, global_rank=0)
net = GradWrap(NetWithLoss(Net()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_cell_graph_executor.compile(net, x, y, b)
| 30.76
| 74
| 0.683572
|
2c24708a3bffb2bd86287214dbc7b1011691595e
| 60,660
|
py
|
Python
|
django/test/testcases.py
|
hashlash/django
|
3baf92cf8230ad3a932986170fd07c8feae7ff2f
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 5
|
2020-07-04T09:43:05.000Z
|
2022-02-23T12:22:24.000Z
|
django/test/testcases.py
|
Warlockk/django
|
e12fea24f06f8911ddc2af1d6cbfb1adb529c1f2
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null |
django/test/testcases.py
|
Warlockk/django
|
e12fea24f06f8911ddc2af1d6cbfb1adb529c1f2
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 1
|
2020-06-24T10:24:20.000Z
|
2020-06-24T10:24:20.000Z
|
import asyncio
import difflib
import json
import posixpath
import sys
import threading
import unittest
from collections import Counter
from contextlib import contextmanager
from copy import copy
from difflib import get_close_matches
from functools import wraps
from unittest.suite import _DebugResult
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse,
)
from urllib.request import url2pathname
from asgiref.sync import async_to_sync
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils.functional import classproperty
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Put value into a list if it's not already one. Return an empty list if
value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
'%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1)
)
)
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if self.rendered_templates:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names)
)
else:
message += ' No template was rendered.'
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise AssertionError(self.message)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
databases = set()
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in SimpleTestCase '
'subclasses. Either subclass TestCase or TransactionTestCase to ensure '
'proper test isolation or add %(alias)r to %(test)s.databases to silence '
'this failure.'
)
_disallowed_connection_methods = [
('connect', 'connections'),
('temporary_connection', 'connections'),
('cursor', 'queries'),
('chunked_cursor', 'queries'),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls._add_databases_failures()
@classmethod
def _validate_databases(cls):
if cls.databases == '__all__':
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = '%s.%s.databases refers to %r which is not defined in settings.DATABASES.' % (
cls.__module__,
cls.__qualname__,
alias,
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += ' Did you mean %r?' % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
'test': '%s.%s' % (cls.__module__, cls.__qualname__),
'alias': alias,
'operation': operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
@classmethod
def tearDownClass(cls):
cls._remove_databases_failures()
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super().tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self._setup_and_call(result)
def debug(self):
"""Perform the same as __call__(), without catching the exception."""
debug_result = _DebugResult()
self._setup_and_call(debug_result, debug=True)
def _setup_and_call(self, result, debug=False):
"""
Perform the following in order: pre-setup, run test, post-teardown,
skipping pre/post hooks if test is set to be skipped.
If debug=True, reraise any errors in setup and use super().debug()
instead of __call__() to run the test.
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
# Convert async test methods.
if asyncio.iscoroutinefunction(testMethod):
setattr(self, self._testMethodName, async_to_sync(testMethod))
if not skipped:
try:
self._pre_setup()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
if debug:
super().debug()
else:
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, msg_prefix='',
fetch_redirect_response=True):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
# Get the redirection page, using the same client that was used
# to obtain the original response.
extra = response.client.extra or {}
redirect_response = response.client.get(
path,
QueryDict(query),
secure=(scheme == 'https'),
**extra,
)
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
self.assertURLEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def assertURLEqual(self, url1, url2, msg_prefix=''):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment))
self.assertEqual(
normalize(url1), normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Assert that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors or 'none')
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Assert that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
not non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
not non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises, 'exception', expected_exception, expected_message,
*args, **kwargs
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns, 'warning', expected_warning, expected_message,
*args, **kwargs
)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, 'required': False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages['required']]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
str(dom1).splitlines(), str(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = {DEFAULT_DB_ALIAS}
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in this test. '
'Add %(alias)r to %(test)s.databases to ensure proper test isolation '
'and silence this failure.'
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias for alias in connections
if alias in cls.databases and (
include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = connections.all() if aliases is None else (connections[alias] for alias in aliases)
return all(conn.features.supports_transactions for conn in conns)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
@classmethod
def tearDownClass(cls):
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (args and isinstance(args[0], unittest.TestCase) and
connection.alias not in getattr(args[0], 'databases', {})):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database." % (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, 'databases', None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database." % (
name, test_item, connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get('__unittest_skip__')
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features),
'skipIfDBFeature',
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features),
'skipUnlessDBFeature',
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
'skipUnlessAnyDBFeature',
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live http server while the tests are running."""
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self):
return ThreadedWSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def setUpClass(cls):
super().setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
# Explicitly enable thread-shareability for this connection
conn.inc_thread_sharing()
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
# Restore sqlite in-memory database connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
cls._live_server_modified_settings.disable()
super().tearDownClass()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._lockfile.close()
| 40.413058
| 115
| 0.604105
|
85690fa8450025a8f7489b0ea456ab35da84c04c
| 2,738
|
py
|
Python
|
Exporters/Blender/src/babylon-js/exporter_settings_panel.py
|
harunpehlivan/Babylon.js
|
57275f4acf02e3d5a908672dcbda0922c7307f18
|
[
"Apache-2.0"
] | 1
|
2019-04-08T07:44:09.000Z
|
2019-04-08T07:44:09.000Z
|
Exporters/Blender/src/babylon-js/exporter_settings_panel.py
|
harunpehlivan/Babylon.js
|
57275f4acf02e3d5a908672dcbda0922c7307f18
|
[
"Apache-2.0"
] | null | null | null |
Exporters/Blender/src/babylon-js/exporter_settings_panel.py
|
harunpehlivan/Babylon.js
|
57275f4acf02e3d5a908672dcbda0922c7307f18
|
[
"Apache-2.0"
] | 1
|
2019-04-08T07:49:27.000Z
|
2019-04-08T07:49:27.000Z
|
from .package_level import *
import bpy
# Panel displayed in Scene Tab of properties, so settings can be saved in a .blend file
class ExporterSettingsPanel(bpy.types.Panel):
bl_label = get_title()
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'scene'
bpy.types.Scene.export_onlySelectedLayer = bpy.props.BoolProperty(
name='Export only selected layers',
description='Export only selected layers',
default = False,
)
bpy.types.Scene.export_flatshadeScene = bpy.props.BoolProperty(
name='Flat shade entire scene',
description='Use face normals on all meshes. Increases vertices.',
default = False,
)
bpy.types.Scene.force64Kmeshes = bpy.props.BoolProperty(
name='Force 64k per Mesh Vertex Limit',
description='When true, break up meshes with > 64k vertices for older\nhardware. No effect when no qualifying meshes.',
default = True,
)
bpy.types.Scene.attachedSound = bpy.props.StringProperty(
name='Sound',
description='',
default = ''
)
bpy.types.Scene.loopSound = bpy.props.BoolProperty(
name='Loop sound',
description='',
default = True
)
bpy.types.Scene.autoPlaySound = bpy.props.BoolProperty(
name='Auto play sound',
description='',
default = True
)
bpy.types.Scene.inlineTextures = bpy.props.BoolProperty(
name='inline',
description='turn textures into encoded strings, for direct inclusion into source code',
default = False
)
bpy.types.Scene.textureDir = bpy.props.StringProperty(
name='Sub-directory',
description='The path below the output directory to write texture files (any separators OS dependent)',
default = ''
)
bpy.types.Scene.ignoreIKBones = bpy.props.BoolProperty(
name='Ignore IK Bones',
description="Do not export bones with either '.ik' or 'ik.'(not case sensitive) in the name",
default = False,
)
def draw(self, context):
layout = self.layout
scene = context.scene
layout.prop(scene, 'export_onlySelectedLayer')
layout.prop(scene, 'export_flatshadeScene')
layout.prop(scene, 'force64Kmeshes')
layout.prop(scene, 'ignoreIKBones')
box = layout.box()
box.label(text='Texture Location:')
box.prop(scene, 'inlineTextures')
row = box.row()
row.enabled = not scene.inlineTextures
row.prop(scene, 'textureDir')
box = layout.box()
box.prop(scene, 'attachedSound')
box.prop(scene, 'autoPlaySound')
box.prop(scene, 'loopSound')
| 36.026316
| 128
| 0.635135
|
d273ecebddb6d0970334840a297d5346b4190ec1
| 186
|
py
|
Python
|
LEVEL1/제일작은수제거하기/solution.py
|
seunghwanly/CODING-TEST
|
a820da950c163d399594770199aa2e782d1fbbde
|
[
"MIT"
] | null | null | null |
LEVEL1/제일작은수제거하기/solution.py
|
seunghwanly/CODING-TEST
|
a820da950c163d399594770199aa2e782d1fbbde
|
[
"MIT"
] | null | null | null |
LEVEL1/제일작은수제거하기/solution.py
|
seunghwanly/CODING-TEST
|
a820da950c163d399594770199aa2e782d1fbbde
|
[
"MIT"
] | null | null | null |
def solution(arr):
if len(arr) > 1:
item = min(arr)
del arr[arr.index(item)]
return arr
else: return [-1]
print(solution([4,3,2,1]))
print(solution([4]))
| 20.666667
| 32
| 0.548387
|
f1fd337a65bb09ced55edf055a2ba205b2a75705
| 3,905
|
py
|
Python
|
snoop/content_types.py
|
hoover/snoop
|
bd49b081418a8a01a1e469ab17759a4c5b20d850
|
[
"MIT"
] | 5
|
2017-01-03T00:52:03.000Z
|
2019-10-27T03:32:35.000Z
|
snoop/content_types.py
|
hoover/snoop
|
bd49b081418a8a01a1e469ab17759a4c5b20d850
|
[
"MIT"
] | 25
|
2016-08-21T11:26:44.000Z
|
2018-03-13T12:19:20.000Z
|
snoop/content_types.py
|
hoover/snoop
|
bd49b081418a8a01a1e469ab17759a4c5b20d850
|
[
"MIT"
] | 6
|
2016-09-27T13:03:45.000Z
|
2019-10-27T03:32:30.000Z
|
import mimetypes
import magic
mimetypes.add_type('message/x-emlx', '.emlx')
mimetypes.add_type('message/x-emlxpart', '.emlxpart')
mimetypes.add_type('application/vnd.ms-outlook', '.msg')
mimetypes.add_type('application/x-hoover-pst', '.pst')
mimetypes.add_type('application/x-hoover-pst', '.ost')
mimetypes.add_type('application/x-pgp-encrypted-ascii', '.asc')
mimetypes.add_type('application/x-pgp-encrypted-binary', '.pgp')
def guess_content_type(filename):
return mimetypes.guess_type(filename, strict=False)[0] or ''
FILE_TYPES = {
'application/x-directory': 'folder',
'application/pdf': 'pdf',
'text/plain': 'text',
'text/html': 'html',
'application/x-hush-pgp-encrypted-html-body': 'html',
'message/x-emlx': 'email',
'message/rfc822': 'email',
'application/vnd.ms-outlook': 'email',
'application/x-hoover-pst': 'email-archive',
'application/msword': 'doc',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'doc',
'application/vnd.openxmlformats-officedocument.wordprocessingml.template': 'doc',
'application/vnd.ms-word.document.macroEnabled.12': 'doc',
'application/vnd.ms-word.template.macroEnabled.12': 'doc',
'application/vnd.oasis.opendocument.text': 'doc',
'application/vnd.oasis.opendocument.text-template': 'doc',
'application/rtf': 'doc',
'application/vnd.ms-excel': 'xls',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': 'xls',
'application/vnd.openxmlformats-officedocument.spreadsheetml.template': 'xls',
'application/vnd.ms-excel.sheet.macroEnabled.12': 'xls',
'application/vnd.ms-excel.template.macroEnabled.12': 'xls',
'application/vnd.ms-excel.addin.macroEnabled.12': 'xls',
'application/vnd.ms-excel.sheet.binary.macroEnabled.12': 'xls',
'application/vnd.oasis.opendocument.spreadsheet-template': 'xls',
'application/vnd.oasis.opendocument.spreadsheet': 'xls',
'application/vnd.openxmlformats-officedocument.presentationml.presentation': 'ppt',
'application/vnd.openxmlformats-officedocument.presentationml.template': 'ppt',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow': 'ppt',
'application/vnd.ms-powerpoint': 'ppt',
'application/vnd.ms-powerpoint.addin.macroEnabled.12': 'ppt',
'application/vnd.ms-powerpoint.presentation.macroEnabled.12': 'ppt',
'application/vnd.ms-powerpoint.template.macroEnabled.12': 'ppt',
'application/vnd.ms-powerpoint.slideshow.macroEnabled.12': 'ppt',
'application/vnd.oasis.opendocument.presentation': 'ppt',
'application/vnd.oasis.opendocument.presentation-template': 'ppt',
'application/zip': 'archive',
'application/rar': 'archive',
'application/x-7z-compressed': 'archive',
'application/x-tar': 'archive',
'application/x-bzip2': 'archive',
'application/x-zip': 'archive',
'application/x-gzip': 'archive',
'application/x-zip-compressed': 'archive',
'application/x-rar-compressed': 'archive',
}
MAGIC_DESCRIPTION_TYPES = {
"Microsoft Outlook email folder (>=2003)": "application/x-hoover-pst",
"Composite Document File V2 Document": "application/vnd.ms-outlook",
}
MAGIC_READ_LIMIT = 24 * 1024 * 1024
def libmagic_guess_content_type(file, filesize):
buffer = file.read(min(MAGIC_READ_LIMIT, filesize))
content_type = magic.from_buffer(buffer, mime=True)
if content_type in FILE_TYPES:
return content_type
magic_description = magic.from_buffer(buffer, mime=False)
return MAGIC_DESCRIPTION_TYPES.get(magic_description, content_type or '')
def guess_filetype(doc):
content_type = doc.content_type.split(';')[0]
if content_type in FILE_TYPES:
return FILE_TYPES[content_type]
else:
supertype = content_type.split('/')[0]
if supertype in ['audio', 'video', 'image']:
return supertype
return None
| 41.542553
| 87
| 0.714725
|
2f840aca672bc20d880ebd6129769eebdc3f89f5
| 903
|
py
|
Python
|
ctrl/cmorph/old/cmorph_regrid.py
|
markmuetz/cosmic
|
f215c499bfc8f1d717dea6aa78a58632a4e89113
|
[
"Apache-2.0"
] | null | null | null |
ctrl/cmorph/old/cmorph_regrid.py
|
markmuetz/cosmic
|
f215c499bfc8f1d717dea6aa78a58632a4e89113
|
[
"Apache-2.0"
] | null | null | null |
ctrl/cmorph/old/cmorph_regrid.py
|
markmuetz/cosmic
|
f215c499bfc8f1d717dea6aa78a58632a4e89113
|
[
"Apache-2.0"
] | 1
|
2021-01-26T02:25:48.000Z
|
2021-01-26T02:25:48.000Z
|
import sys
from pathlib import Path
import iris
from cosmic.util import load_module, filepath_regrid
def main(year, month):
target_filepath = Path(f'/gws/nopw/j04/cosmic/mmuetz/data/u-al508/ap9.pp/precip_200501/al508a.p9200501.asia_precip.nc')
cmorph_datadir = Path(f'/gws/nopw/j04/cosmic/mmuetz/data/cmorph_data')
cmorph_filepath = cmorph_datadir / f'8km-30min/precip_{year}{month:02}/cmorph_ppt_{year}{month:02}.asia.nc'
output_filepath = cmorph_filepath.parent / (cmorph_filepath.stem + '.N1280.nc')
print(f'Regrid {cmorph_filepath} -> {output_filepath}')
print(f' using {target_filepath} resolution')
coarse_cube = filepath_regrid(cmorph_filepath, target_filepath)
iris.save(coarse_cube, str(output_filepath), zlib=True)
if __name__ == '__main__':
config = load_module(sys.argv[1])
config_key = sys.argv[2]
main(*config.SCRIPT_ARGS[config_key])
| 33.444444
| 123
| 0.744186
|
74aa3d3bb96a81c9140e5a38d934c9e9cfee3e46
| 226
|
py
|
Python
|
Dict comp/aula67.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | 1
|
2021-09-01T01:58:13.000Z
|
2021-09-01T01:58:13.000Z
|
Dict comp/aula67.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | null | null | null |
Dict comp/aula67.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | null | null | null |
lista = [
('chave', 'valor'),
('chave2', 'valor2'),
('chave3', 3),
]
# d1 = { x: y*2 for x, y in lista}
# d1 = { x.upper(): y.upper()*2 for x, y in lista}
d1 = { f'chave_{x}': x**2 for x in range(5)}
print(d1)
| 16.142857
| 50
| 0.482301
|
ae7a4dfbe07b2f2204d8e33df76264e85d3847fe
| 419
|
py
|
Python
|
sudoku/src/main.py
|
klesnkri/sudoku-solver-aco
|
10170bf2a66877811fe78a65d45cb4d752c0e6f2
|
[
"MIT"
] | 2
|
2021-05-14T14:51:37.000Z
|
2022-02-19T16:59:41.000Z
|
sudoku/src/main.py
|
christee8/sudoku-solver-aco
|
10170bf2a66877811fe78a65d45cb4d752c0e6f2
|
[
"MIT"
] | null | null | null |
sudoku/src/main.py
|
christee8/sudoku-solver-aco
|
10170bf2a66877811fe78a65d45cb4d752c0e6f2
|
[
"MIT"
] | 1
|
2021-06-06T20:32:15.000Z
|
2021-06-06T20:32:15.000Z
|
import sys
from App import App
def main():
# Command line arguments
if len(sys.argv) != 3 or (sys.argv[2] != "gui" and sys.argv[2] != "console"):
print("usage: " + sys.argv[0] + " <input sudoku filename> <gui/console>")
return
sudoku_file = sys.argv[1]
gui_active = (sys.argv[2] == "gui")
app = App(gui_active, sudoku_file)
app.run()
if __name__ == "__main__":
main()
| 19.952381
| 81
| 0.582339
|
0ec5dfb1b134a65644022f2513b464c8072c835b
| 253
|
py
|
Python
|
manage.py
|
klebercode/protocolle
|
31b464e41e20b77d3bce2a4688b0f4f7dff2c3af
|
[
"MIT"
] | null | null | null |
manage.py
|
klebercode/protocolle
|
31b464e41e20b77d3bce2a4688b0f4f7dff2c3af
|
[
"MIT"
] | null | null | null |
manage.py
|
klebercode/protocolle
|
31b464e41e20b77d3bce2a4688b0f4f7dff2c3af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "protocolle.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23
| 74
| 0.774704
|
2071dd49fd312292976a8179dd06fcde8a122b3d
| 10,932
|
py
|
Python
|
ZabbixAlertYTWorkflow.py
|
devopshq/zabbix-youtrack-action
|
e4d8354e059687cc0ae13b1c5c25bf70de03c31f
|
[
"MIT"
] | 5
|
2017-05-20T21:12:47.000Z
|
2021-03-26T14:53:09.000Z
|
ZabbixAlertYTWorkflow.py
|
devopshq/zabbix-youtrack-action
|
e4d8354e059687cc0ae13b1c5c25bf70de03c31f
|
[
"MIT"
] | null | null | null |
ZabbixAlertYTWorkflow.py
|
devopshq/zabbix-youtrack-action
|
e4d8354e059687cc0ae13b1c5c25bf70de03c31f
|
[
"MIT"
] | 3
|
2016-07-30T11:38:00.000Z
|
2019-02-28T10:39:28.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) DevOpsHQ, 2016
# Integration YouTrack and Zabbix alerts.
import yaml
from pyzabbix import ZabbixAPI
import sys
import urllib
import logging
import time
from youtrack.connection import Connection
import re
from requests.packages.urllib3 import disable_warnings
disable_warnings() # disable ssl certificate errors
# ------------ START Constants ------------
YT_PROJECT_NAME = 'CM' # ID project in Youtrack
YT_ASSIGNEE = 'Zabbix' # Assignee to after create issue
YT_TYPE = 'Error' # Youtrack Issue type
YT_SERVICE = 'Zabbix' # Youtrack Issue service
YT_SUBSYSTEM = 'DevOps' # Youtrack Issue subsystem
YT_USER = 'Zabbix' # Youtrack Issue create user
YT_PASSWORD = sys.argv[4] # Youtrack user password
YT_TIME = 'About 1 hour' # Estimated time
# YT_TIME = 'Undefined' # Estimated time
YT_COMMENT = "Now is {status}. \n{text}\n\n" # Add this comment in issue
LOG_FILE_NAME = '/var/log/zabbix/PtZabbixAlertYTWorkflow.log' # Path to Log-file for debug
# LOG_FILE_NAME = 'PtZabbixAlertYTWorkflow.log' # Uncomment for debug in Windows-OS
ZABBIX_SERVER = "https://zabbix.example.com/zabbix"
ZBX_USER = "zabbix_api"
ZBX_PASSWORD = sys.argv[5]
# ------------ END Constants ------------
# ------------ START Setup logging ------------
# Use logger to log information
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# Log to file
fh = logging.FileHandler(LOG_FILE_NAME)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
# Log to stdout
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch) # Use logger to log information
# Log from pyzabbix
log = logging.getLogger('pyzabbix')
log.addHandler(ch)
log.addHandler(fh)
log.setLevel(logging.DEBUG)
# ------------ END Setup logging ------------
# ------------ START ZabbixAPI block ------------
Zbx = ZabbixAPI(ZABBIX_SERVER)
Zbx.session.verify = False
Zbx.login(ZBX_USER, ZBX_PASSWORD)
# ------------ END ZabbixAPI block ------------
# ------------ START Function declaration ------------
def ExecAndLog(connection, issueId, command="", comment=""):
logger.debug("Run command in {issueId}: {command}. {comment}".format(issueId=issueId,
command=command,
comment=comment
))
connection.executeCommand(issueId=issueId,
command=command,
comment=comment,
)
# ------------ END Function declaration ------------
def Main(sendTo, subject, yamlMessage):
"""
Workflow Zabbix-YouTrack
:param sendTo: URL to Youtrack (ex. https://youtrack.example.com)
:param subject: subject from Zabbix Action
:param yamlMessage: message from Zabbix Action
:return:
"""
# ----- Use below example yamlMessage to debug -----
# yamlMessage = """Name: 'Test Zabbix-YT workflow, ignore it'
# Text: 'Agent ping (server:agent.ping()): DOWN (1) '
# Hostname: 'server.exmpale.ru'
# Status: "OK"
# Severity: "High"
# EventID: "96976"
# TriggerID: "123456789012" """
messages = yaml.load(yamlMessage)
# ----- START Issue parameters -----
# Correspondence between the YouTrackPriority and ZabbixSeverity
# Critical >= High
# Normal < High
ytPriority = 'Normal'
if messages['Severity'] == 'Disaster' or messages['Severity'] == 'High':
ytPriority = 'Critical'
ytName = "{} ZabbixTriggerID::{}".format(messages['Name'], messages['TriggerID'])
# ----- END Issue parameters -----
# ----- START Youtrack Issue description -----
# Search link to other issue
searchString = "Hostname: '{}'".format(messages['Hostname'])
linkToHostIssue = "{youtrack}/issues/{projectname}?q={query}".format(
youtrack=sendTo,
projectname=YT_PROJECT_NAME,
query=urllib.parse.quote(searchString, safe='')
)
issueDescription = """
{ytName}
-----
{yamlMessage}
-----
- [https://zabbix.example.com/zabbix.php?action=dashboard.view Zabbix Dashboard]
- Show [{linkToHostIssue} all issue for *this host*]
""".format(
ytName=ytName,
yamlMessage=yamlMessage,
linkToHostIssue=linkToHostIssue, )
# ----- END Youtrack Issue description -----
# ----- START Youtrack current week -----
# Create connect to Youtrack API
connection = Connection(sendTo, YT_USER, YT_PASSWORD)
# Get current week in YT format (Sprint planned)
version = connection.getAllBundles('version')
for fixVersion in version[0].values:
if fixVersion['archived'] == False and fixVersion['released'] == False:
fixVersionWeek = fixVersion['name']
break
# ----- END Youtrack current week -----
# ----- START Youtrack get or create issue -----
# Get issue if exist
# Search for TriggerID
createNewIssue = False
logger.debug("Get issue with text '{}'".format(messages['TriggerID']))
issue = connection.getIssues(YT_PROJECT_NAME,
"ZabbixTriggerID::{}".format(messages['TriggerID']),
0,
1)
if len(issue) == 0:
createNewIssue = True
else:
# if issue contains TriggerID in summary, then it's good issue
# else create new issue, this is bad issue, not from Zabbix
if "ZabbixTriggerID::{}".format(messages['TriggerID']) in issue[0]['summary']:
issueId = issue[0]['id']
issue = connection.getIssue(issueId)
else:
createNewIssue = True
# Create new issue
if createNewIssue:
logger.debug("Create new issue because it is not exist")
issue = connection.createIssue(YT_PROJECT_NAME,
'Unassigned',
ytName,
issueDescription,
priority=ytPriority,
subsystem=YT_SUBSYSTEM,
type=YT_TYPE,
)
time.sleep(3)
# Parse ID for new issue
result = re.search(r'(CM-\d*)', issue[0]['location'])
issueId = result.group(0)
issue = connection.getIssue(issueId)
logger.debug("Issue have id={}".format(issueId))
# Set issue service
ExecAndLog(connection, issueId, "Service {}".format(YT_SERVICE))
# Update priority
ExecAndLog(connection, issueId, "Priority {}".format(ytPriority))
# ----- END Youtrack get or create issue -----
# ----- START PROBLEM block ------
if messages['Status'] == "PROBLEM":
# Issue exist and NOT Hold on, Unnassigned and Estimated time set
if issue['State'] != 'Hold on':
# Estimated time
ExecAndLog(connection, issueId, "Estimated time {}".format(YT_TIME))
# Update fix version
ExecAndLog(connection=connection, issueId=issueId, command="Sprint planned {}".format(fixVersionWeek))
# Reopen if Fixed or Verified or Canceled
if issue['State'] == 'Fixed' or issue['State'] == 'Verified' or issue['State'] == 'Canceled':
# Reopen Issue
ExecAndLog(connection, issueId, "State reopen")
# Assignee issue
ExecAndLog(connection, issueId, command="Assignee Unassigned")
# Update summary and description for issue
logger.debug("Run command in {issueId}: {command}".format(issueId=issueId,
command="""Update summary and description with connection.updateIssue method"""
))
connection.updateIssue(issueId=issueId, summary=ytName, description=issueDescription)
# Add comment
logger.debug("Run command in {issueId}: {command}".format(issueId=issueId,
command="""Now is PROBLEM {}""".format(
messages['Text'])
))
connection.executeCommand(issueId=issueId,
command="",
comment=YT_COMMENT.format(
status=messages['Status'],
text=messages['Text'])
)
# Send ID to Zabbix:
logger.debug("ZABBIX-API: Send Youtrack ID to {}".format(messages['EventID']))
Zbx.event.acknowledge(eventids=messages['EventID'], message="Create Youtrack task")
Zbx.event.acknowledge(eventids=messages['EventID'],
message="https://youtrack.example.com/issue/{}".format(issueId))
# ----- End PROBLEM block ------
# ----- Start OK block -----
if messages['Status'] == "OK":
if issue['State'] == 'Hold on' or issue['State'] == 'Registered':
# Cancel if not in work
ExecAndLog(connection, issueId, command="State Cancel")
# Assignee issue
ExecAndLog(connection, issueId, command="Assignee {}".format(YT_ASSIGNEE))
if issue['State'] == 'Fixed':
# Verify if Fixed
ExecAndLog(connection, issueId, command="State verify")
logger.debug("Run command in {issueId}: {command}".format(issueId=issueId,
command="""Now is OK {}""".format(messages['Text'])
))
connection.executeCommand(issueId=issueId,
command="",
comment=YT_COMMENT.format(
status=messages['Status'],
text=messages['Text'])
)
# ----- End OK block -----
if __name__ == "__main__":
logger.debug("Start script with arguments: {}".format(sys.argv[1:]))
try:
Main(
# Arguments WIKI: https://www.zabbix.com/documentation/3.0/ru/manual/config/notifications/media/script
sys.argv[1], # to
sys.argv[2], # subject
sys.argv[3], # body
# FYI: Next argument used in code:
# sys.argv[4], # YT_PASSWORD
# sys.argv[5], # ZBX_PASSWORD
)
except Exception:
logger.exception("Exit with error") # Output exception
exit(1)
| 36.198675
| 145
| 0.554885
|
f614c47b3a151d4e70d4c3cda4b3c4265d64f7cd
| 5,186
|
py
|
Python
|
ironic_python_agent/openstack/common/ironic_lib/utils.py
|
faizan-barmawer/ironic-python-agent
|
589b198e960a69fc9d71825fdf204cbbd59f39dd
|
[
"Apache-2.0"
] | null | null | null |
ironic_python_agent/openstack/common/ironic_lib/utils.py
|
faizan-barmawer/ironic-python-agent
|
589b198e960a69fc9d71825fdf204cbbd59f39dd
|
[
"Apache-2.0"
] | null | null | null |
ironic_python_agent/openstack/common/ironic_lib/utils.py
|
faizan-barmawer/ironic-python-agent
|
589b198e960a69fc9d71825fdf204cbbd59f39dd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import errno
import logging
import os
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from ironic_python_agent.openstack.common.ironic_lib import exception
from ironic_python_agent.openstack.common._i18n import _LE
from ironic_python_agent.openstack.common._i18n import _LW
utils_opts = [
cfg.StrOpt('rootwrap_config',
default="",
help='Path to the rootwrap configuration file to use for '
'running commands as root.'),
cfg.StrOpt('rootwrap_helper_cmd',
default="",
help='Path to the rootwrap configuration file to use for '
'running commands as root.'),
cfg.StrOpt('tempdir',
help='Explicitly specify the temporary working directory.'),
]
CONF = cfg.CONF
CONF.register_opts(utils_opts)
LOG = logging.getLogger(__name__)
def _get_root_helper():
return '%s %s' % (CONF.rootwrap_helper_cmd, CONF.rootwrap_config)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method.
:param cmd: Passed to processutils.execute.
:param use_standard_locale: True | False. Defaults to False. If set to
True, execute command with standard locale
added to environment variables.
:returns: (stdout, stderr) from process execution
:raises: UnknownArgumentError
:raises: ProcessExecutionError
"""
use_standard_locale = kwargs.pop('use_standard_locale', False)
if use_standard_locale:
env = kwargs.pop('env_variables', os.environ.copy())
env['LC_ALL'] = 'C'
kwargs['env_variables'] = env
if kwargs.get('run_as_root') and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
result = processutils.execute(*cmd, **kwargs)
LOG.debug('Execution completed, command line is "%s"',
' '.join(map(str, cmd)))
LOG.debug('Command stdout is: "%s"' % result[0])
LOG.debug('Command stderr is: "%s"' % result[1])
return result
def mkfs(fs, path, label=None):
"""Format a file or block device
:param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
'btrfs', etc.)
:param path: Path to file or block device to format
:param label: Volume label to use
"""
if fs == 'swap':
args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
# add -F to force no interactive execute on non-block device.
if fs in ('ext3', 'ext4'):
args.extend(['-F'])
if label:
if fs in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
args.extend([label_opt, label])
args.append(path)
try:
execute(*args, run_as_root=True, use_standard_locale=True)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctx:
if os.strerror(errno.ENOENT) in e.stderr:
ctx.reraise = False
LOG.exception(_LE('Failed to make file system. '
'File system %s is not supported.'), fs)
raise exception.FileSystemNotSupported(fs=fs)
else:
LOG.exception(_LE('Failed to create a file system '
'in %(path)s. Error: %(error)s'),
{'path': path, 'error': e})
def unlink_without_raise(path):
try:
os.unlink(path)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
LOG.warn(_LW("Failed to unlink %(path)s, error: %(e)s"),
{'path': path, 'e': e})
def dd(src, dst, *args):
"""Execute dd from src to dst.
:param src: the input file for dd command.
:param dst: the output file for dd command.
:param args: a tuple containing the arguments to be
passed to dd command.
:raises: processutils.ProcessExecutionError if it failed
to run the process.
"""
LOG.debug("Starting dd process.")
execute('dd', 'if=%s' % src, 'of=%s' % dst, *args,
run_as_root=True, check_exit_code=[0])
def is_http_url(url):
url = url.lower()
return url.startswith('http://') or url.startswith('https://')
| 34.805369
| 78
| 0.625916
|
d10e8863134faadc53cd2057a122c4e6b512fc9b
| 5,698
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200801/get_route_filter_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200801/get_route_filter_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200801/get_route_filter_rule.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRouteFilterRuleResult',
'AwaitableGetRouteFilterRuleResult',
'get_route_filter_rule',
]
@pulumi.output_type
class GetRouteFilterRuleResult:
"""
Route Filter Rule Resource.
"""
def __init__(__self__, access=None, communities=None, etag=None, id=None, location=None, name=None, provisioning_state=None, route_filter_rule_type=None):
if access and not isinstance(access, str):
raise TypeError("Expected argument 'access' to be a str")
pulumi.set(__self__, "access", access)
if communities and not isinstance(communities, list):
raise TypeError("Expected argument 'communities' to be a list")
pulumi.set(__self__, "communities", communities)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if route_filter_rule_type and not isinstance(route_filter_rule_type, str):
raise TypeError("Expected argument 'route_filter_rule_type' to be a str")
pulumi.set(__self__, "route_filter_rule_type", route_filter_rule_type)
@property
@pulumi.getter
def access(self) -> str:
"""
The access type of the rule.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def communities(self) -> Sequence[str]:
"""
The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020'].
"""
return pulumi.get(self, "communities")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the route filter rule resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilterRuleType")
def route_filter_rule_type(self) -> str:
"""
The rule type of the rule.
"""
return pulumi.get(self, "route_filter_rule_type")
class AwaitableGetRouteFilterRuleResult(GetRouteFilterRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteFilterRuleResult(
access=self.access,
communities=self.communities,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
route_filter_rule_type=self.route_filter_rule_type)
def get_route_filter_rule(resource_group_name: Optional[str] = None,
route_filter_name: Optional[str] = None,
rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteFilterRuleResult:
"""
Route Filter Rule Resource.
:param str resource_group_name: The name of the resource group.
:param str route_filter_name: The name of the route filter.
:param str rule_name: The name of the rule.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['routeFilterName'] = route_filter_name
__args__['ruleName'] = rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200801:getRouteFilterRule', __args__, opts=opts, typ=GetRouteFilterRuleResult).value
return AwaitableGetRouteFilterRuleResult(
access=__ret__.access,
communities=__ret__.communities,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
route_filter_rule_type=__ret__.route_filter_rule_type)
| 35.6125
| 158
| 0.647596
|
58f20085decd164e0fc59b01031be872e82942d8
| 1,216
|
py
|
Python
|
test/test_videos_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_videos_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_videos_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import embyapi
from embyapi.api.videos_service_api import VideosServiceApi # noqa: E501
from embyapi.rest import ApiException
class TestVideosServiceApi(unittest.TestCase):
"""VideosServiceApi unit test stubs"""
def setUp(self):
self.api = VideosServiceApi() # noqa: E501
def tearDown(self):
pass
def test_delete_videos_by_id_alternatesources(self):
"""Test case for delete_videos_by_id_alternatesources
Removes alternate video sources. # noqa: E501
"""
pass
def test_get_videos_by_id_additionalparts(self):
"""Test case for get_videos_by_id_additionalparts
Gets additional parts for a video. # noqa: E501
"""
pass
def test_post_videos_mergeversions(self):
"""Test case for post_videos_mergeversions
Merges videos into a single record # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 22.109091
| 73
| 0.676809
|
7a8ee979453b2a33c07f33d4054b781bb33c56d0
| 1,073
|
py
|
Python
|
peas/server/reconnect_task.py
|
13thProgression/peas-blockchain
|
8e058cbfe0c1ab73f7c1ec41bedb39071c63141c
|
[
"Apache-2.0"
] | 2
|
2021-08-16T17:45:07.000Z
|
2021-09-18T19:00:58.000Z
|
peas/server/reconnect_task.py
|
13thProgression/peas-blockchain
|
8e058cbfe0c1ab73f7c1ec41bedb39071c63141c
|
[
"Apache-2.0"
] | 4
|
2021-09-26T15:50:20.000Z
|
2021-10-06T06:18:51.000Z
|
peas/server/reconnect_task.py
|
13thProgression/peas-blockchain
|
8e058cbfe0c1ab73f7c1ec41bedb39071c63141c
|
[
"Apache-2.0"
] | 3
|
2021-09-29T19:08:41.000Z
|
2022-03-15T08:47:28.000Z
|
import asyncio
import socket
from peas.server.server import PeasServer
from peas.types.peer_info import PeerInfo
def start_reconnect_task(server: PeasServer, peer_info_arg: PeerInfo, log, auth: bool):
"""
Start a background task that checks connection and reconnects periodically to a peer.
"""
peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port)
async def connection_check():
while True:
peer_retry = True
for _, connection in server.all_connections.items():
if connection.get_peer_info() == peer_info or connection.get_peer_info() == peer_info_arg:
peer_retry = False
if peer_retry:
log.info(f"Reconnecting to peer {peer_info}")
try:
await server.start_client(peer_info, None, auth=auth)
except Exception as e:
log.info(f"Failed to connect to {peer_info} {e}")
await asyncio.sleep(3)
return asyncio.create_task(connection_check())
| 37
| 106
| 0.643989
|
51148bca23f588e0b7392640d1c53e0333ab77fe
| 1,134
|
py
|
Python
|
speakeasy/winenv/defs/windows/netapi32.py
|
mwilliams31/speakeasy
|
7b75f767017f665d66e6736f9f42cd32c1f9476d
|
[
"MIT"
] | null | null | null |
speakeasy/winenv/defs/windows/netapi32.py
|
mwilliams31/speakeasy
|
7b75f767017f665d66e6736f9f42cd32c1f9476d
|
[
"MIT"
] | null | null | null |
speakeasy/winenv/defs/windows/netapi32.py
|
mwilliams31/speakeasy
|
7b75f767017f665d66e6736f9f42cd32c1f9476d
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2021 FireEye, Inc. All Rights Reserved.
from speakeasy.struct import EmuStruct, Ptr
import ctypes as ct
NERR_Success = 0
ERROR_INVALID_LEVEL = 0x7C
class WKSTA_INFO_100(EmuStruct):
def __init__(self, ptr_size):
super().__init__(ptr_size)
self.wki_platform_id = Ptr
self.wki_computername = Ptr
self.wki_langroup = Ptr
self.wki_ver_major = ct.c_uint32
self.wki_ver_minor = ct.c_uint32
class WKSTA_INFO_101(EmuStruct):
def __init__(self, ptr_size):
super().__init__(ptr_size)
self.wki_platform_id = Ptr
self.wki_computername = Ptr
self.wki_langroup = Ptr
self.wki_ver_major = ct.c_uint32
self.wki_ver_minor = ct.c_uint32
self.wki_lanroot = Ptr
class WKSTA_INFO_102(EmuStruct):
def __init__(self, ptr_size):
super().__init__(ptr_size)
self.wki_platform_id = Ptr
self.wki_computername = Ptr
self.wki_langroup = Ptr
self.wki_ver_major = ct.c_uint32
self.wki_ver_minor = ct.c_uint32
self.wki_lanroot = Ptr
self.wki_logged_on_users = Ptr
| 27.658537
| 55
| 0.671958
|
eb943ea0b3ef035285705a00961cc5940adf0958
| 25,453
|
py
|
Python
|
src/tests/conftest.py
|
wjwoodson/pretalx
|
935d0295e408a3518bf472147cc16722c44fe3fb
|
[
"Apache-2.0"
] | 1
|
2021-06-19T02:33:29.000Z
|
2021-06-19T02:33:29.000Z
|
src/tests/conftest.py
|
AKJK-Internet-32002/pretalx
|
062177a8a361dd57a8889e2dd422beec505881a9
|
[
"Apache-2.0"
] | null | null | null |
src/tests/conftest.py
|
AKJK-Internet-32002/pretalx
|
062177a8a361dd57a8889e2dd422beec505881a9
|
[
"Apache-2.0"
] | null | null | null |
import datetime as dt
import pytest
import pytz
from django.core import management
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils.timezone import now
from django_scopes import scope, scopes_disabled
from pretalx.event.models import Event, Organiser, Team, TeamInvite
from pretalx.mail.models import MailTemplate
from pretalx.person.models import SpeakerInformation, SpeakerProfile, User
from pretalx.schedule.models import Availability, Room, TalkSlot
from pretalx.submission.models import (
Answer,
AnswerOption,
Feedback,
Question,
QuestionVariant,
Resource,
Review,
Submission,
SubmissionType,
SubmitterAccessCode,
Track,
)
@pytest.fixture(scope="session", autouse=True)
def collect_static(request):
management.call_command("collectstatic", "--noinput", "--clear")
@pytest.fixture
def template_patch(monkeypatch):
# Patch out template rendering for performance improvements
monkeypatch.setattr(
"django.template.backends.django.Template.render",
lambda *args, **kwargs: "mocked template",
)
@pytest.fixture
def organiser():
with scopes_disabled():
o = Organiser.objects.create(name="Super Organiser", slug="superorganiser")
Team.objects.create(
name="Organisers",
organiser=o,
can_create_events=True,
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
)
Team.objects.create(
name="Organisers and reviewers",
organiser=o,
can_create_events=True,
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
is_reviewer=True,
)
Team.objects.create(name="Reviewers", organiser=o, is_reviewer=True)
return o
@pytest.fixture
def other_organiser():
with scopes_disabled():
o = Organiser.objects.create(name="Different Organiser", slug="diffo")
Team.objects.create(
name="Organisers",
organiser=o,
can_create_events=True,
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
)
Team.objects.create(
name="Organisers and reviewers",
organiser=o,
can_create_events=True,
can_change_teams=True,
can_change_organiser_settings=True,
can_change_event_settings=True,
can_change_submissions=True,
is_reviewer=True,
)
Team.objects.create(name="Reviewers", organiser=o, is_reviewer=True)
return o
@pytest.fixture
def event(organiser):
today = dt.date.today()
with scopes_disabled():
event = Event.objects.create(
name="Fancy testevent",
is_public=True,
slug="test",
email="orga@orga.org",
date_from=today,
date_to=today + dt.timedelta(days=3),
organiser=organiser,
)
# exporting takes quite some time, so this speeds up our tests
event.settings.export_html_on_schedule_release = False
for team in organiser.teams.all():
team.limit_events.add(event)
return event
@pytest.fixture
def other_event(other_organiser):
with scopes_disabled():
event = Event.objects.create(
name="Boring testevent",
is_public=True,
slug="other",
email="orga2@orga.org",
date_from=dt.date.today() + dt.timedelta(days=1),
date_to=dt.date.today() + dt.timedelta(days=1),
organiser=other_organiser,
)
event.settings.export_html_on_schedule_release = False
for team in other_organiser.teams.all():
team.limit_events.add(event)
return event
@pytest.fixture
def multilingual_event(organiser):
with scopes_disabled():
today = dt.date.today()
event = Event.objects.create(
name="Fancy testevent",
is_public=True,
slug="test2",
email="orga@orga.org",
date_from=today,
date_to=today + dt.timedelta(days=3),
locale_array="en,de",
organiser=organiser,
)
event.settings.export_html_on_schedule_release = False
for team in organiser.teams.all():
team.limit_events.add(event)
return event
@pytest.fixture
def resource(submission):
f = SimpleUploadedFile("testresource.txt", b"a resource")
with scope(event=submission.event):
return Resource.objects.create(
submission=submission, resource=f, description="Test resource"
)
@pytest.fixture
def confirmed_resource(confirmed_submission):
f = SimpleUploadedFile("confirmed_testresource.txt", b"a confirmed resource")
return Resource.objects.create(
submission=confirmed_submission,
resource=f,
description="Confirmed test resource",
)
@pytest.fixture
def other_resource(submission):
f = SimpleUploadedFile("testresource2.txt", b"another resource")
with scope(event=submission.event):
return Resource.objects.create(
submission=submission, resource=f, description="Test resource 2"
)
@pytest.fixture
def question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="How much do you like green, on a scale from 1-10?",
variant=QuestionVariant.NUMBER,
target="submission",
required=False,
contains_personal_data=False,
position=1,
)
@pytest.fixture
def inactive_question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="So, on a scale from 1–100, how much do you like red?",
variant=QuestionVariant.NUMBER,
target="submission",
required=False,
active=False,
position=2,
)
@pytest.fixture
def answer(event, submission, question):
with scope(event=event):
return Answer.objects.create(
answer="11", submission=submission, question=question
)
@pytest.fixture
def speaker_question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="What is your favourite color?",
variant=QuestionVariant.STRING,
target="speaker",
required=False,
position=3,
)
@pytest.fixture
def review_question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="What is your favourite color?",
variant=QuestionVariant.STRING,
target="reviewer",
required=True,
position=4,
)
@pytest.fixture
def speaker_boolean_question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="Do you like green?",
variant=QuestionVariant.BOOLEAN,
target="speaker",
required=False,
position=5,
)
@pytest.fixture
def boolean_question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="Do you like green?",
variant=QuestionVariant.BOOLEAN,
target="submission",
required=False,
position=6,
)
@pytest.fixture
def file_question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="Please submit your paper.",
variant=QuestionVariant.FILE,
target="submission",
required=False,
position=7,
)
@pytest.fixture
def speaker_file_question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="Please submit your CV.",
variant=QuestionVariant.FILE,
target="speaker",
required=False,
position=8,
)
@pytest.fixture
def choice_question(event):
with scope(event=event):
question = Question.objects.create(
event=event,
question="How much do you like green?",
variant=QuestionVariant.CHOICES,
target="speaker",
required=False,
position=9,
)
for answer in ["very", "incredibly", "omggreen"]:
AnswerOption.objects.create(question=question, answer=answer)
return question
@pytest.fixture
def answered_choice_question(choice_question, submission, speaker):
with scope(event=submission.event):
a = Answer.objects.create(
submission=submission, question=choice_question, person=speaker
)
a.options.set([choice_question.options.first()])
a.save()
return choice_question
@pytest.fixture
def multiple_choice_question(event):
with scope(event=event):
question = Question.objects.create(
event=event,
question="Which colors other than green do you like?",
variant=QuestionVariant.MULTIPLE,
target="speaker",
required=False,
position=10,
)
for answer in ["yellow", "blue", "black"]:
AnswerOption.objects.create(question=question, answer=answer)
return question
@pytest.fixture
def speaker_text_question(event):
with scope(event=event):
return Question.objects.create(
event=event,
question="Please elaborat on your like/dislike of green.",
variant=QuestionVariant.TEXT,
target="speaker",
required=False,
position=11,
)
@pytest.fixture
def personal_question(submission):
with scope(event=submission.event):
return Question.objects.create(
event=submission.event,
target="submission",
variant="boolean",
question="Do you identify as a hacker?",
contains_personal_data=True,
position=12,
)
@pytest.fixture
def impersonal_answer(question, speaker, submission):
with scope(event=question.event):
return Answer.objects.create(
answer="True", submission=submission, person=speaker, question=question
)
@pytest.fixture
def personal_answer(personal_question, speaker, submission):
with scope(event=personal_question.event):
return Answer.objects.create(
answer="True",
person=speaker,
question=personal_question,
submission=submission,
)
@pytest.fixture
def user():
with scopes_disabled():
return User.objects.create_user(
email="testuser@example.com", password="testpassw0rd!"
)
@pytest.fixture
def administrator():
with scopes_disabled():
u = User.objects.create_superuser(
email="testuser@examplecom", password="testpassw0rd!"
)
u.is_administrator = True
u.save()
return u
@pytest.fixture
def orga_user(event):
with scopes_disabled():
user = User.objects.create_user(
password="orgapassw0rd", email="orgauser@orga.org"
)
team = event.organiser.teams.filter(
can_change_organiser_settings=True, is_reviewer=False
).first()
team.members.add(user)
team.save()
return user
@pytest.fixture
def other_orga_user(event):
with scopes_disabled():
user = User.objects.create_user(
password="orgapassw0rd", email="evilorgauser@orga.org"
)
team = event.organiser.teams.filter(
can_change_organiser_settings=True, is_reviewer=False
).first()
team.members.add(user)
team.save()
return user
@pytest.fixture
def review_user(organiser, event):
with scopes_disabled():
user = User.objects.create_user(
password="reviewpassw0rd", email="reviewuser@orga.org"
)
if not event.organiser:
event.organiser = organiser
event.save()
team, _ = event.organiser.teams.get_or_create(
can_change_organiser_settings=False, is_reviewer=True
)
team.members.add(user)
team.save()
return user
@pytest.fixture
def other_review_user(event):
with scopes_disabled():
user = User.objects.create_user(
password="reviewpassw0rd", email="evilreviewuser@orga.org"
)
team = event.organiser.teams.filter(
can_change_organiser_settings=False, is_reviewer=True
).first()
team.members.add(user)
team.save()
return user
@pytest.fixture
def orga_reviewer_user(event):
with scopes_disabled():
user = User.objects.create_user(
password="orgapassw0rd", email="multiuser@orga.org"
)
team = event.organiser.teams.filter(
can_change_organiser_settings=True, is_reviewer=True
).first()
team.members.add(user)
team.save()
return user
@pytest.fixture
def orga_client(orga_user, client):
client.force_login(orga_user)
return client
@pytest.fixture
def review_client(review_user, client):
client.force_login(review_user)
return client
@pytest.fixture
def other_review_client(other_review_user, client):
client.force_login(other_review_user)
return client
@pytest.fixture
def administrator_client(administrator, client):
client.force_login(administrator)
return client
@pytest.fixture
def submission_type(event):
with scope(event=event):
return SubmissionType.objects.create(
name="Workshop", event=event, default_duration=60
)
@pytest.fixture
def default_submission_type(event):
return event.cfp.default_type
@pytest.fixture
def speaker(event):
with scopes_disabled():
user = User.objects.create_user(
password="speakerpwd1!", name="Jane Speaker", email="jane@speaker.org"
)
with scope(event=event):
SpeakerProfile.objects.create(
user=user, event=event, biography="Best speaker in the world."
)
return user
@pytest.fixture
def speaker_client(client, speaker):
client.force_login(speaker)
return client
@pytest.fixture
def other_speaker(event):
with scopes_disabled():
user = User.objects.create_user(
email="speaker2@example.org", password="speakerpwd1!", name="Krümelmonster"
)
with scope(event=event):
SpeakerProfile.objects.create(user=user, event=event, biography="COOKIIIIES!!")
return user
@pytest.fixture
def other_speaker_client(client, other_speaker):
client.force_login(other_speaker)
return client
@pytest.fixture
def submission_data(event, submission_type):
return {
"title": "Lametta im Wandel der Zeiten",
"submission_type": submission_type,
"description": "Früher war es nämlich mehr. Und wir mussten es bügeln.",
"abstract": "Ich habe Quellen!",
"notes": "Und mein Enkel braucht auch noch ein Geschenk.",
"internal_notes": "Ich habe gestern mit dem Redner telefoniert. -- slubby",
"content_locale": "en",
"event": event,
}
@pytest.fixture
def submission(submission_data, speaker, event):
with scope(event=event):
sub = Submission.objects.create(**submission_data)
sub.save()
sub.speakers.add(speaker)
return sub
@pytest.fixture
def other_submission(event, other_speaker):
with scope(event=event):
sub = Submission.objects.create(
title="Albrecht Dürer. Sein Leben, seine Zeit",
event=event,
submission_type=event.cfp.default_type,
description="1 guter Talk",
abstract="Verstehste?",
notes="I like cookies A LOT",
content_locale="en",
)
sub.save()
sub.speakers.add(other_speaker)
return sub
@pytest.fixture
def accepted_submission(speaker, submission_data, event):
with scope(event=event):
sub = Submission.objects.create(**submission_data)
sub.save()
sub.speakers.add(speaker)
sub.accept()
return sub
@pytest.fixture
def other_accepted_submission(other_submission):
with scope(event=other_submission.event):
other_submission.accept()
return other_submission
@pytest.fixture
def rejected_submission(submission_data, other_speaker, event):
with scope(event=event):
sub = Submission.objects.create(**submission_data)
sub.save()
sub.speakers.add(other_speaker)
sub.reject()
return sub
@pytest.fixture
def confirmed_submission(submission_data, speaker, event):
with scope(event=event):
sub = Submission.objects.create(**submission_data)
sub.save()
sub.speakers.add(speaker)
sub.accept()
sub.confirm()
return sub
@pytest.fixture
def other_confirmed_submission(other_accepted_submission, event):
with scope(event=event):
other_accepted_submission.confirm()
return other_accepted_submission
@pytest.fixture
def canceled_submission(submission_data, speaker, event):
with scope(event=event):
sub = Submission.objects.create(**submission_data)
sub.save()
sub.speakers.add(speaker)
sub.cancel(force=True)
return sub
@pytest.fixture
def withdrawn_submission(submission_data, speaker, event):
with scope(event=event):
sub = Submission.objects.create(**submission_data)
sub.save()
sub.speakers.add(speaker)
sub.withdraw(force=True)
return sub
@pytest.fixture
def deleted_submission(event, submission_data, other_speaker):
with scope(event=event):
sub = Submission.objects.create(**submission_data)
sub.speakers.add(other_speaker)
sub.remove(force=True)
return sub
@pytest.fixture
def invitation(event):
with scope(event=event):
team = event.organiser.teams.filter(
can_change_organiser_settings=True, is_reviewer=False
).first()
return TeamInvite.objects.create(
team=team, token="testtoken", email="some@test.mail"
)
@pytest.fixture
def mail_template(event):
with scope(event=event):
return MailTemplate.objects.create(
event=event,
subject="Some Mail",
text="Whee mail content!",
reply_to="orga@orga.org",
)
@pytest.fixture(scope="function")
def mail(mail_template, speaker, event):
with scope(event=event):
return mail_template.to_mail(speaker, event)
@pytest.fixture(scope="function")
def other_mail(mail_template, event, speaker):
with scope(event=event):
return mail_template.to_mail(speaker, event)
@pytest.fixture
def sent_mail(mail_template, speaker, event):
with scope(event=event):
mail = mail_template.to_mail(speaker, event)
mail.send()
return mail
@pytest.fixture
def room(event):
with scope(event=event):
return Room.objects.create(
event=event,
name="Testroom",
description="A fancy room",
position=2,
capacity=50,
)
@pytest.fixture
def room_availability(event, room, availability):
with scope(event=event):
availability.room = room
availability.save()
return availability
@pytest.fixture
def other_room(event):
with scope(event=event):
return Room.objects.create(
event=event,
name="Second Testroom",
description="A less fancy room",
position=1,
capacity=10,
)
@pytest.fixture
def availability(event):
with scope(event=event):
return Availability(
event=event,
start=dt.datetime.combine(event.date_from, dt.time.min, tzinfo=pytz.utc),
end=dt.datetime.combine(event.date_to, dt.time.max, tzinfo=pytz.utc),
)
@pytest.fixture
def schedule(event):
with scope(event=event):
event.release_schedule("🍪 Version")
return event.current_schedule
@pytest.fixture
def slot(confirmed_submission, room, schedule):
with scope(event=room.event):
TalkSlot.objects.update_or_create(
submission=confirmed_submission,
schedule=room.event.wip_schedule,
defaults={"is_visible": True},
)
TalkSlot.objects.update_or_create(
submission=confirmed_submission,
schedule=schedule,
defaults={"is_visible": True},
)
slots = TalkSlot.objects.filter(submission=confirmed_submission)
slots.update(
start=room.event.datetime_from,
end=room.event.datetime_from + dt.timedelta(minutes=60),
room=room,
)
return slots.get(schedule=schedule)
@pytest.fixture
def unreleased_slot(confirmed_submission, room):
with scope(event=room.event):
schedule = confirmed_submission.event.wip_schedule
slot = schedule.talks.filter(submission=confirmed_submission)
slot.update(
start=room.event.datetime_from,
end=room.event.datetime_from + dt.timedelta(minutes=30),
room=room,
schedule=schedule,
is_visible=True,
)
slot = slot.first()
return slot
@pytest.fixture
def past_slot(other_confirmed_submission, room, schedule, speaker):
with scope(event=room.event):
slot = (
other_confirmed_submission.slots.filter(schedule=schedule).first()
or other_confirmed_submission.slots.first()
)
slot.start = now() - dt.timedelta(minutes=60)
slot.end = now() - dt.timedelta(minutes=30)
slot.room = room
slot.schedule = schedule
slot.is_visible = True
slot.save()
return slot
@pytest.fixture
def break_slot(room, schedule):
with scope(event=schedule.event):
TalkSlot.objects.create(
description="Break",
schedule=schedule.event.wip_schedule,
)
slot = TalkSlot.objects.create(
description="Break",
schedule=schedule,
is_visible=True,
)
slots = TalkSlot.objects.filter(submission__isnull=True)
slots.update(
start=room.event.datetime_from + dt.timedelta(minutes=90),
end=room.event.datetime_from + dt.timedelta(minutes=120),
room=room,
)
return slot
@pytest.fixture
def canceled_talk(past_slot):
with scope(event=past_slot.submission.event):
past_slot.submission.cancel(force=True)
past_slot.submission.event.wip_schedule.freeze("vcanceled")
return past_slot
@pytest.fixture
def feedback(past_slot):
with scope(event=past_slot.submission.event):
return Feedback.objects.create(talk=past_slot.submission, review="I liked it!")
@pytest.fixture
def other_slot(other_confirmed_submission, room, schedule):
with scope(event=room.event):
return TalkSlot.objects.create(
start=room.event.datetime_from + dt.timedelta(minutes=60),
end=room.event.datetime_from + dt.timedelta(minutes=90),
submission=other_confirmed_submission,
room=room,
schedule=schedule,
is_visible=True,
)
@pytest.fixture
def schedule_schema():
from lxml import etree
with open("tests/fixtures/schedule.xsd", "r") as xsd:
source = xsd.read()
schema = etree.XML(source)
return etree.XMLSchema(schema)
@pytest.fixture
def review(submission, review_user):
with scope(event=submission.event):
return Review.objects.create(
score=1, submission=submission, user=review_user, text="Looks great!"
)
@pytest.fixture
def other_review(other_submission, other_review_user):
with scope(event=other_submission.event):
return Review.objects.create(
score=0,
submission=other_submission,
user=other_review_user,
text="Looks horrible!",
)
@pytest.fixture
def information(event):
with scope(event=event):
return SpeakerInformation.objects.create(
event=event, title="Information title", text="Important information"
)
@pytest.fixture
def track(event):
with scope(event=event):
event.settings.use_tracks = True
return Track.objects.create(name="Test Track", color="00ff00", event=event)
@pytest.fixture
def other_track(event):
with scope(event=event):
event.settings.use_tracks = True
return Track.objects.create(
name="Second Test Track", color="ff0000", event=event
)
@pytest.fixture
def access_code(event):
with scope(event=event):
return SubmitterAccessCode.objects.create(event=event)
| 27.847921
| 87
| 0.636664
|
4f16eb57653ebd4735974abc23e2cfc478c28867
| 18,288
|
py
|
Python
|
app/ctaStrategy/ctaHistoryData.py
|
Michael-JC/inverstment_analysis
|
724bcda7606d3049e6299f7f3390872954f6628f
|
[
"Apache-2.0"
] | 1
|
2021-05-25T06:34:13.000Z
|
2021-05-25T06:34:13.000Z
|
app/ctaStrategy/ctaHistoryData.py
|
Michael-JC/inverstment_analysis
|
724bcda7606d3049e6299f7f3390872954f6628f
|
[
"Apache-2.0"
] | null | null | null |
app/ctaStrategy/ctaHistoryData.py
|
Michael-JC/inverstment_analysis
|
724bcda7606d3049e6299f7f3390872954f6628f
|
[
"Apache-2.0"
] | null | null | null |
# encoding: UTF-8
"""
本模块中主要包含:
1. 从通联数据下载历史行情的引擎
2. 用来把MultiCharts导出的历史数据载入到MongoDB中用的函数
3. 增加从通达信导出的历史数据载入到MongoDB中的函数
"""
from datetime import datetime, timedelta
from time import time
from multiprocessing.pool import ThreadPool
import pymongo
from vnpy.data.datayes import DatayesApi
from trader.vtFunction import globalSetting
from trader.vtObject import VtBarData
from language.chinese.vt_constant import *
from .ctaBase import SETTING_DB_NAME, TICK_DB_NAME, MINUTE_DB_NAME, DAILY_DB_NAME
# 以下为vn.trader和通联数据规定的交易所代码映射
VT_TO_DATAYES_EXCHANGE = {}
VT_TO_DATAYES_EXCHANGE[EXCHANGE_CFFEX] = 'CCFX' # 中金所
VT_TO_DATAYES_EXCHANGE[EXCHANGE_SHFE] = 'XSGE' # 上期所
VT_TO_DATAYES_EXCHANGE[EXCHANGE_CZCE] = 'XZCE' # 郑商所
VT_TO_DATAYES_EXCHANGE[EXCHANGE_DCE] = 'XDCE' # 大商所
DATAYES_TO_VT_EXCHANGE = {v:k for k,v in VT_TO_DATAYES_EXCHANGE.items()}
########################################################################
class HistoryDataEngine(object):
"""CTA模块用的历史数据引擎"""
#----------------------------------------------------------------------
def __init__(self, token):
"""Constructor"""
self.dbClient = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'])
self.datayesClient = DatayesApi(token)
#----------------------------------------------------------------------
def lastTradeDate(self):
"""获取最近交易日(只考虑工作日,无法检查国内假期)"""
today = datetime.now()
oneday = timedelta(1)
if today.weekday() == 5:
today = today - oneday
elif today.weekday() == 6:
today = today - oneday*2
return today.strftime('%Y%m%d')
#----------------------------------------------------------------------
def readFuturesProductSymbol(self):
"""查询所有期货产品代码"""
cx = self.dbClient[SETTING_DB_NAME]['FuturesSymbol'].find()
return set([d['productSymbol'] for d in cx]) # 这里返回的是集合(因为会重复)
#----------------------------------------------------------------------
def readFuturesSymbol(self):
"""查询所有期货合约代码"""
cx = self.dbClient[SETTING_DB_NAME]['FuturesSymbol'].find()
return [d['symbol'] for d in cx] # 这里返回的是列表
#----------------------------------------------------------------------
def downloadFuturesSymbol(self, tradeDate=''):
"""下载所有期货的代码"""
if not tradeDate:
tradeDate = self.lastTradeDate()
self.dbClient[SETTING_DB_NAME]['FuturesSymbol'].ensure_index([('symbol', pymongo.ASCENDING)],
unique=True)
path = 'api/market/getMktMFutd.json'
params = {}
params['tradeDate'] = tradeDate
data = self.datayesClient.downloadData(path, params)
if data:
for d in data:
symbolDict = {}
symbolDict['symbol'] = d['ticker']
symbolDict['productSymbol'] = d['contractObject']
flt = {'symbol': d['ticker']}
self.dbClient[SETTING_DB_NAME]['FuturesSymbol'].update_one(flt, {'$set':symbolDict},
upsert=True)
print u'期货合约代码下载完成'
else:
print u'期货合约代码下载失败'
#----------------------------------------------------------------------
def downloadFuturesDailyBar(self, symbol):
"""
下载期货合约的日行情,symbol是合约代码,
若最后四位为0000(如IF0000),代表下载连续合约。
"""
print u'开始下载%s日行情' %symbol
# 查询数据库中已有数据的最后日期
cl = self.dbClient[DAILY_DB_NAME][symbol]
cx = cl.find(sort=[('datetime', pymongo.DESCENDING)])
if cx.count():
last = cx[0]
else:
last = ''
# 主力合约
if '0000' in symbol:
path = 'api/market/getMktMFutd.json'
params = {}
params['contractObject'] = symbol.replace('0000', '')
params['mainCon'] = 1
if last:
params['startDate'] = last['date']
# 交易合约
else:
path = 'api/market/getMktFutd.json'
params = {}
params['ticker'] = symbol
if last:
params['startDate'] = last['date']
# 开始下载数据
data = self.datayesClient.downloadData(path, params)
if data:
# 创建datetime索引
self.dbClient[DAILY_DB_NAME][symbol].ensure_index([('datetime', pymongo.ASCENDING)],
unique=True)
for d in data:
bar = VtBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
try:
bar.exchange = DATAYES_TO_VT_EXCHANGE.get(d.get('exchangeCD', ''), '')
bar.open = d.get('openPrice', 0)
bar.high = d.get('highestPrice', 0)
bar.low = d.get('lowestPrice', 0)
bar.close = d.get('closePrice', 0)
bar.date = d.get('tradeDate', '').replace('-', '')
bar.time = ''
bar.datetime = datetime.strptime(bar.date, '%Y%m%d')
bar.volume = d.get('turnoverVol', 0)
bar.openInterest = d.get('openInt', 0)
except KeyError:
print d
flt = {'datetime': bar.datetime}
self.dbClient[DAILY_DB_NAME][symbol].update_one(flt, {'$set':bar.__dict__}, upsert=True)
print u'%s下载完成' %symbol
else:
print u'找不到合约%s' %symbol
#----------------------------------------------------------------------
def downloadAllFuturesDailyBar(self):
"""下载所有期货的主力合约日行情"""
start = time()
print u'开始下载所有期货的主力合约日行情'
productSymbolSet = self.readFuturesProductSymbol()
print u'代码列表读取成功,产品代码:%s' %productSymbolSet
# 这里也测试了线程池,但可能由于下载函数中涉及较多的数据格
# 式转换,CPU开销较大,多线程效率并无显著改变。
#p = ThreadPool(10)
#p.map(self.downloadFuturesDailyBar, productSymbolSet)
#p.close()
#p.join()
for productSymbol in productSymbolSet:
self.downloadFuturesDailyBar(productSymbol+'0000')
print u'所有期货的主力合约日行情已经全部下载完成, 耗时%s秒' %(time()-start)
#----------------------------------------------------------------------
def downloadFuturesIntradayBar(self, symbol):
"""下载期货的日内分钟行情"""
print u'开始下载%s日内分钟行情' %symbol
# 日内分钟行情只有具体合约
path = 'api/market/getFutureBarRTIntraDay.json'
params = {}
params['instrumentID'] = symbol
params['unit'] = 1
data = self.datayesClient.downloadData(path, params)
if data:
today = datetime.now().strftime('%Y%m%d')
# 创建datetime索引
self.dbClient[MINUTE_DB_NAME][symbol].ensure_index([('datetime', pymongo.ASCENDING)],
unique=True)
for d in data:
bar = VtBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
try:
bar.exchange = DATAYES_TO_VT_EXCHANGE.get(d.get('exchangeCD', ''), '')
bar.open = d.get('openPrice', 0)
bar.high = d.get('highestPrice', 0)
bar.low = d.get('lowestPrice', 0)
bar.close = d.get('closePrice', 0)
bar.date = today
bar.time = d.get('barTime', '')
bar.datetime = datetime.strptime(bar.date + ' ' + bar.time, '%Y%m%d %H:%M')
bar.volume = d.get('totalVolume', 0)
bar.openInterest = 0
except KeyError:
print d
flt = {'datetime': bar.datetime}
self.dbClient[MINUTE_DB_NAME][symbol].update_one(flt, {'$set':bar.__dict__}, upsert=True)
print u'%s下载完成' %symbol
else:
print u'找不到合约%s' %symbol
#----------------------------------------------------------------------
def downloadEquitySymbol(self, tradeDate=''):
"""下载所有股票的代码"""
if not tradeDate:
tradeDate = self.lastTradeDate()
self.dbClient[SETTING_DB_NAME]['EquitySymbol'].ensure_index([('symbol', pymongo.ASCENDING)],
unique=True)
path = 'api/market/getMktEqud.json'
params = {}
params['tradeDate'] = tradeDate
data = self.datayesClient.downloadData(path, params)
if data:
for d in data:
symbolDict = {}
symbolDict['symbol'] = d['ticker']
flt = {'symbol': d['ticker']}
self.dbClient[SETTING_DB_NAME]['EquitySymbol'].update_one(flt, {'$set':symbolDict},
upsert=True)
print u'股票代码下载完成'
else:
print u'股票代码下载失败'
#----------------------------------------------------------------------
def downloadEquityDailyBar(self, symbol):
"""
下载股票的日行情,symbol是股票代码
"""
print u'开始下载%s日行情' %symbol
# 查询数据库中已有数据的最后日期
cl = self.dbClient[DAILY_DB_NAME][symbol]
cx = cl.find(sort=[('datetime', pymongo.DESCENDING)])
if cx.count():
last = cx[0]
else:
last = ''
# 开始下载数据
path = 'api/market/getMktEqud.json'
params = {}
params['ticker'] = symbol
if last:
params['beginDate'] = last['date']
data = self.datayesClient.downloadData(path, params)
if data:
# 创建datetime索引
self.dbClient[DAILY_DB_NAME][symbol].ensure_index([('datetime', pymongo.ASCENDING)],
unique=True)
for d in data:
bar = VtBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
try:
bar.exchange = DATAYES_TO_VT_EXCHANGE.get(d.get('exchangeCD', ''), '')
bar.open = d.get('openPrice', 0)
bar.high = d.get('highestPrice', 0)
bar.low = d.get('lowestPrice', 0)
bar.close = d.get('closePrice', 0)
bar.date = d.get('tradeDate', '').replace('-', '')
bar.time = ''
bar.datetime = datetime.strptime(bar.date, '%Y%m%d')
bar.volume = d.get('turnoverVol', 0)
except KeyError:
print d
flt = {'datetime': bar.datetime}
self.dbClient[DAILY_DB_NAME][symbol].update_one(flt, {'$set':bar.__dict__}, upsert=True)
print u'%s下载完成' %symbol
else:
print u'找不到合约%s' %symbol
#----------------------------------------------------------------------
def downloadEquityDailyBarts(self, symbol):
"""
下载股票的日行情,symbol是股票代码
"""
print u'开始下载%s日行情' %symbol
# 查询数据库中已有数据的最后日期
cl = self.dbClient[DAILY_DB_NAME][symbol]
cx = cl.find(sort=[('datetime', pymongo.DESCENDING)])
if cx.count():
last = cx[0]
else:
last = ''
# 开始下载数据
import tushare as ts
if last:
start = last['date'][:4]+'-'+last['date'][4:6]+'-'+last['date'][6:]
data = ts.get_k_data(symbol,start)
if not data.empty:
# 创建datetime索引
self.dbClient[DAILY_DB_NAME][symbol].ensure_index([('datetime', pymongo.ASCENDING)],
unique=True)
for index, d in data.iterrows():
bar = VtBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
try:
bar.open = d.get('open')
bar.high = d.get('high')
bar.low = d.get('low')
bar.close = d.get('close')
bar.date = d.get('date').replace('-', '')
bar.time = ''
bar.datetime = datetime.strptime(bar.date, '%Y%m%d')
bar.volume = d.get('volume')
except KeyError:
print d
flt = {'datetime': bar.datetime}
self.dbClient[DAILY_DB_NAME][symbol].update_one(flt, {'$set':bar.__dict__}, upsert=True)
print u'%s下载完成' %symbol
else:
print u'找不到合约%s' %symbol
#----------------------------------------------------------------------
def loadMcCsv(fileName, dbName, symbol):
"""将Multicharts导出的csv格式的历史数据插入到Mongo数据库中"""
import csv
start = time()
print u'开始读取CSV文件%s中的数据插入到%s的%s中' %(fileName, dbName, symbol)
# 锁定集合,并创建索引
client = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'])
collection = client[dbName][symbol]
collection.ensure_index([('datetime', pymongo.ASCENDING)], unique=True)
# 读取数据和插入到数据库
reader = csv.DictReader(file(fileName, 'r'))
for d in reader:
bar = VtBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
bar.open = float(d['Open'])
bar.high = float(d['High'])
bar.low = float(d['Low'])
bar.close = float(d['Close'])
bar.date = datetime.strptime(d['Date'], '%Y-%m-%d').strftime('%Y%m%d')
bar.time = d['Time']
bar.datetime = datetime.strptime(bar.date + ' ' + bar.time, '%Y%m%d %H:%M:%S')
bar.volume = d['TotalVolume']
flt = {'datetime': bar.datetime}
collection.update_one(flt, {'$set':bar.__dict__}, upsert=True)
print bar.date, bar.time
print u'插入完毕,耗时:%s' % (time()-start)
#----------------------------------------------------------------------
def loadTbCsv(fileName, dbName, symbol):
"""将TradeBlazer导出的csv格式的历史分钟数据插入到Mongo数据库中"""
import csv
start = time()
print u'开始读取CSV文件%s中的数据插入到%s的%s中' %(fileName, dbName, symbol)
# 锁定集合,并创建索引
client = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'])
collection = client[dbName][symbol]
collection.ensure_index([('datetime', pymongo.ASCENDING)], unique=True)
# 读取数据和插入到数据库
reader = csv.reader(file(fileName, 'r'))
for d in reader:
bar = VtBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
bar.open = float(d[1])
bar.high = float(d[2])
bar.low = float(d[3])
bar.close = float(d[4])
bar.date = datetime.strptime(d[0].split(' ')[0], '%Y/%m/%d').strftime('%Y%m%d')
bar.time = d[0].split(' ')[1]+":00"
bar.datetime = datetime.strptime(bar.date + ' ' + bar.time, '%Y%m%d %H:%M:%S')
bar.volume = d[5]
bar.openInterest = d[6]
flt = {'datetime': bar.datetime}
collection.update_one(flt, {'$set':bar.__dict__}, upsert=True)
print bar.date, bar.time
print u'插入完毕,耗时:%s' % (time()-start)
#----------------------------------------------------------------------
def loadTbPlusCsv(fileName, dbName, symbol):
"""将TB极速版导出的csv格式的历史分钟数据插入到Mongo数据库中"""
import csv
start = time()
print u'开始读取CSV文件%s中的数据插入到%s的%s中' %(fileName, dbName, symbol)
# 锁定集合,并创建索引
client = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'])
collection = client[dbName][symbol]
collection.ensure_index([('datetime', pymongo.ASCENDING)], unique=True)
# 读取数据和插入到数据库
reader = csv.reader(file(fileName, 'r'))
for d in reader:
bar = VtBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
bar.open = float(d[2])
bar.high = float(d[3])
bar.low = float(d[4])
bar.close = float(d[5])
bar.date = str(d[0])
tempstr=str(round(float(d[1])*10000)).split(".")[0].zfill(4)
bar.time = tempstr[:2]+":"+tempstr[2:4]+":00"
bar.datetime = datetime.strptime(bar.date + ' ' + bar.time, '%Y%m%d %H:%M:%S')
bar.volume = d[6]
bar.openInterest = d[7]
flt = {'datetime': bar.datetime}
collection.update_one(flt, {'$set':bar.__dict__}, upsert=True)
print bar.date, bar.time
print u'插入完毕,耗时:%s' % (time()-start)
#----------------------------------------------------------------------
def loadTdxCsv(fileName, dbName, symbol):
"""将通达信导出的csv格式的历史分钟数据插入到Mongo数据库中"""
import csv
start = time()
print u'开始读取CSV文件%s中的数据插入到%s的%s中' %(fileName, dbName, symbol)
# 锁定集合,并创建索引
client = pymongo.MongoClient(globalSetting['mongoHost'], globalSetting['mongoPort'])
collection = client[dbName][symbol]
collection.ensure_index([('datetime', pymongo.ASCENDING)], unique=True)
# 读取数据和插入到数据库
reader = csv.reader(file(fileName, 'r'))
for d in reader:
bar = VtBarData()
bar.vtSymbol = symbol
bar.symbol = symbol
bar.open = float(d[2])
bar.high = float(d[3])
bar.low = float(d[4])
bar.close = float(d[5])
bar.date = datetime.strptime(d[0], '%Y/%m/%d').strftime('%Y%m%d')
bar.time = d[1][:2]+':'+d[1][2:4]+':00'
bar.datetime = datetime.strptime(bar.date + ' ' + bar.time, '%Y%m%d %H:%M:%S')
bar.volume = d[6]
bar.openInterest = d[7]
flt = {'datetime': bar.datetime}
collection.update_one(flt, {'$set':bar.__dict__}, upsert=True)
print bar.date, bar.time
print u'插入完毕,耗时:%s' % (time()-start)
| 36
| 117
| 0.481026
|
09a5ae08d304994c4bd6f597cd6882012a6c7cdd
| 5,892
|
py
|
Python
|
app/cogs/owner/owner_commands.py
|
tayron1/Starboard-2
|
ff83e33e026107d94c306250827373ff2c32aa7d
|
[
"MIT"
] | null | null | null |
app/cogs/owner/owner_commands.py
|
tayron1/Starboard-2
|
ff83e33e026107d94c306250827373ff2c32aa7d
|
[
"MIT"
] | null | null | null |
app/cogs/owner/owner_commands.py
|
tayron1/Starboard-2
|
ff83e33e026107d94c306250827373ff2c32aa7d
|
[
"MIT"
] | null | null | null |
import io
import textwrap
import time
import traceback
from contextlib import redirect_stdout
import discord
from asyncpg.exceptions import InterfaceError
from discord.ext import commands
from ... import checks, menus, utils
from ...classes.bot import Bot
class Owner(commands.Cog):
"Owner-only commands"
def __init__(self, bot: Bot) -> None:
self.bot = bot
@commands.command()
@checks.is_owner()
async def evall(self, ctx, *, body: str):
"""Evaluates code on all clusters and returns their response"""
_msgs = await self.bot.websocket.send_command(
"eval", {"content": body}, expect_resp=True
)
msgs = [f"```py\n{m['author']}: {m['data']}\n```" for m in _msgs]
await ctx.send(" ".join(msgs))
@commands.command(name="eval")
@checks.is_owner()
async def _eval(self, ctx, *, body: str):
"""Evaluates python code on the current cluster"""
env = {
"bot": self.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"_": self.bot._last_result,
}
env.update(globals())
body = self.bot.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f"```py\n{e.__class__.__name__}: {e}\n```")
func = env["func"]
try:
with redirect_stdout(stdout):
ret = await func()
except Exception:
value = stdout.getvalue()
await ctx.send(f"```py\n{value}{traceback.format_exc()}\n```")
else:
value = stdout.getvalue()
try:
await ctx.message.add_reaction("\u2705")
except (discord.Forbidden, discord.NotFound):
pass
if ret is None:
if value:
await ctx.send(f"```py\n{value}\n```")
else:
self.bot._last_result = ret
await ctx.send(f"```py\n{value}{ret}\n```")
@commands.command(name="sqltimes")
@checks.is_owner()
async def get_sql_times(
self, ctx: commands.Context, sort_by: str = "total"
) -> None:
"""Shows stats on SQL queries"""
if sort_by not in ["avg", "total", "exec"]:
await ctx.send("Valid optons are `avg`, `total`, and `exec`.")
return
times = []
for sql in self.bot.db.sql_times:
total_time = 0
executions = 0
for exec_time in self.bot.db.sql_times[sql]:
executions += 1
total_time += exec_time
times.append(
(sql, (total_time / executions, total_time, executions))
)
pag = commands.Paginator(prefix="", suffix="", max_size=1000)
if len(times) == 0:
await ctx.send("Nothing to show")
return
def sorter(t):
if sort_by == "avg":
return t[1][0]
elif sort_by == "total":
return t[1][1]
elif sort_by == "exec":
return t[1][2]
times.sort(key=sorter, reverse=True)
for sql, exec_time in times:
pag.add_line(
f"```{sql}```"
f"{utils.ms(exec_time[0])} MS AVG | "
f"{round(exec_time[1], 2)} SECONDS TOTAL | "
f"{exec_time[2]} EXECUTIONS\n"
)
await menus.Paginator(
embeds=[
discord.Embed(
title="SQL Times",
description=p,
color=self.bot.theme_color,
)
for p in pag.pages
],
delete_after=True,
).start(ctx)
@commands.command(name="restart")
@checks.is_owner()
async def restart_bot(self, ctx: commands.Context) -> None:
"""Restars all clusters"""
if not await menus.Confirm("Restart all clusters?").start(ctx):
await ctx.send("Cancelled")
return
await ctx.send("Restarting...")
cmd: commands.Command = self.bot.get_command("evall")
await ctx.invoke(cmd, body="await bot.logout()")
@commands.command(
name="runpg",
aliases=["timepg", "timeit", "runtime"],
brief="Time postgres queries",
description="Time postgres queries",
)
@checks.is_owner()
async def time_postgres(self, ctx: commands.Context, *args: list) -> None:
result = "None"
times = 1
runtimes = []
try:
async with self.bot.db.pool.acquire() as con:
async with con.transaction():
for a in args:
a = "".join(a)
try:
times = int(a)
except Exception:
start = time.time()
for i in range(0, times):
try:
result = await con.fetch(a)
except Exception as e:
await ctx.send(e)
raise Exception("rollback")
runtimes.append((time.time() - start) / times)
times = 1
raise Exception("Rollback")
except (Exception, InterfaceError):
pass
for x, r in enumerate(runtimes):
await ctx.send(f"Query {x} took {round(r*1000, 2)} ms")
await ctx.send(result[0:500])
def setup(bot: Bot) -> None:
bot.add_cog(Owner(bot))
| 31.508021
| 78
| 0.492702
|
b259bdd63388720b6b17c5711e3bfab2cb736754
| 1,981
|
py
|
Python
|
VPython/backup/7/HeliumAtom.py
|
programizer/HeliumAtom
|
01ba9cfe930f40c137b2232447fc4f67b61e9275
|
[
"MIT"
] | null | null | null |
VPython/backup/7/HeliumAtom.py
|
programizer/HeliumAtom
|
01ba9cfe930f40c137b2232447fc4f67b61e9275
|
[
"MIT"
] | null | null | null |
VPython/backup/7/HeliumAtom.py
|
programizer/HeliumAtom
|
01ba9cfe930f40c137b2232447fc4f67b61e9275
|
[
"MIT"
] | null | null | null |
# from math import *
# from visual import *
# from visual.graph import *
from vpython import *
# import vpython
scene.fullscreen = True
G = 0.5
spheres = [
sphere(pos=vector(0,0,0),radius =0.6,color=color.red,charge=sqrt(200),mass=7200,velocity=vector(0,0,0),a = vector(0,0,0),trail=curve(color=color.red)),
sphere(pos=vector(10,0,0),radius=0.2,color=color.blue,charge=-sqrt(10),mass=1,velocity=vector(0,0,0.75),a=vector(0,0,0),trail=curve(color=color.blue)),
#sphere(pos=vector(0,12,0),radius=.08,color=color.green,mass=sqrt(4),velocity=vector(1.2,0,0.6),a=vector(0,0,0),trail=curve(color=color.green)),
sphere(pos=vector(0,5,0),radius=0.2,color=color.white,charge=-sqrt(10),mass=1,velocity=vector(1.5,0,0),a=vector(0,0,0),trail=curve(color=color.white)),
#sphere(pos=vector(0,28,0),radius=.4,color=color.orange,mass=sqrt(80),velocity=vector(0.7,0,0.4),a=vector(0,0,0),trail=curve(color=color.orange)),
#sphere(pos=vector(0,32,0),radius=0.2,color=color.white,mass=-sqrt(10),velocity=vector(1.5,0,0.4),a=vector(0,0,0),trail=curve(color=color.white))
]
#print(spheres[0].a)
#print(len(spheres))
def acceleration1on2(sphere2,sphere1):
r = sphere2.pos - sphere1.pos
r_mag = mag(r)
normal_r = norm(r)
g = ((G*sphere1.charge*sphere2.charge)/pow(r_mag,2))/sphere2.mass*normal_r
#print(g)
return g
t = 0
dt = .01
while 1:
rate(100)
for i in spheres:
i.a = vector(0,0,0)
soi = vector(0,0,0)
for j in spheres:
if i!=j:
i.a = i.a + acceleration1on2(i,j)
for i in spheres:
#print(i.velocity)
i.velocity = i.velocity + i.a *dt
i.pos = i.pos+i.velocity*dt
#print(i.a)
i.trail.append(pos=i.pos)
scene.center=vector(spheres[0].pos.x,spheres[0].pos.y,spheres[0].pos.z)
# print(i.a)
| 31.951613
| 152
| 0.596668
|
7ade01ea7ac83b8e61dc4944ad49c197b8b4a198
| 4,568
|
py
|
Python
|
experiments/steven-images/ddpg_spatial.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/steven-images/ddpg_spatial.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
experiments/steven-images/ddpg_spatial.py
|
Asap7772/railrl_evalsawyer
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
[
"MIT"
] | null | null | null |
import rlkit.misc.hyperparameter as hyp
import rlkit.torch.pytorch_util as ptu
from rlkit.launchers.launcher_util import run_experiment
from torch import nn as nn
from rlkit.torch.networks.experimental import HuberLoss
from rlkit.torch.ddpg.feat_point_ddpg import FeatPointDDPG
from rlkit.torch.networks import FeatPointMlp
from rlkit.torch.networks import ConcatMlp, AETanhPolicy
from rlkit.exploration_strategies.gaussian_strategy import GaussianStrategy
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.envs.wrappers import ImageMujocoWithObsEnv
#from rlkit.images.camera import pusher_2d_init_camera
#from rlkit.envs.mujoco.simple_sawyer import SawyerXYZEnv
from rlkit.envs.mujoco.sawyer_gripper_env import SawyerXYZEnv
import rlkit.images.camera as camera
from rlkit.data_management.env_replay_buffer import AEEnvReplayBuffer
def experiment(variant):
feat_points = 16
history = 1
latent_obs_dim = feat_points * 2 * history
imsize = 64
downsampled_size = 32
env = SawyerXYZEnv()
extra_fc_size = env.obs_dim
env = ImageMujocoWithObsEnv(env,
imsize=imsize,
normalize=True,
grayscale=True,
keep_prev=history-1,
init_camera=camera.sawyer_init_camera)
"""env = ImageMujocoEnv(env,
imsize=imsize,
keep_prev=history-1,
init_camera=camera.sawyer_init_camera)"""
es = GaussianStrategy(
action_space=env.action_space,
)
obs_dim = env.observation_space.low.size
action_dim = env.action_space.low.size
ae = FeatPointMlp(
input_size=imsize,
downsample_size=downsampled_size,
input_channels=1,
num_feat_points=feat_points
)
replay_buffer = AEEnvReplayBuffer(
int(1e4),
env,
imsize=imsize,
history_length=history,
downsampled_size=downsampled_size
)
qf = ConcatMlp(
input_size= latent_obs_dim + extra_fc_size + action_dim,
output_size=1,
hidden_sizes=[400, 300]
)
policy = AETanhPolicy(
input_size=latent_obs_dim + extra_fc_size,
ae=ae,
env=env,
history_length=history,
output_size=action_dim,
hidden_sizes=[400, 300],
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = FeatPointDDPG(
ae,
history,
env=env,
qf=qf,
policy=policy,
exploration_policy=exploration_policy,
replay_buffer=replay_buffer,
extra_fc_size=extra_fc_size,
imsize=imsize,
downsampled_size=downsampled_size,
**variant['algo_params']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_params=dict(
num_epochs=400,
num_steps_per_epoch=1000,
num_steps_per_eval=500,
batch_size=64,
max_path_length=200,
discount=.99,
use_soft_update=True,
tau=1e-2,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
save_replay_buffer=False,
),
qf_criterion_class=nn.MSELoss,
env_id='Reacher-v2'
)
search_space = {
'env_id': [
# 'Acrobot-v1',
#'CartPole-v0',
'Reacher-v2',
#'InvertedPendulum-v1',
# 'CartPole-v1',
# 'MountainCar-v0',
],
# 'algo_params.use_hard_updates': [True, False],
'qf_criterion_class': [
#nn.MSELoss,
HuberLoss,
],
}
# setup_logger('dqn-images-experiment', variant=variant)
# experiment(variant)
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
# for i in range(2):
run_experiment(
experiment,
variant=variant,
exp_id=exp_id,
exp_prefix="sawyer-spatial-FREEZE",
mode='local',
# use_gpu=False,
# exp_prefix="double-vs-dqn-huber-sweep-cartpole",
# mode='local',
use_gpu=True,
)
| 29.470968
| 75
| 0.611646
|
b1fb1146e425b6cd89f062575e2792fad50a1050
| 3,417
|
py
|
Python
|
src/cmds/netspace_funcs.py
|
13767849/chia-blockchain
|
ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66
|
[
"Apache-2.0"
] | null | null | null |
src/cmds/netspace_funcs.py
|
13767849/chia-blockchain
|
ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66
|
[
"Apache-2.0"
] | null | null | null |
src/cmds/netspace_funcs.py
|
13767849/chia-blockchain
|
ad7d7e0cced7f2f6deddc9e006dbaeee6dab8f66
|
[
"Apache-2.0"
] | null | null | null |
import aiohttp
from src.rpc.full_node_rpc_client import FullNodeRpcClient
from src.util.byte_types import hexstr_to_bytes
from src.util.config import load_config
from src.util.default_root import DEFAULT_ROOT_PATH
from src.util.ints import uint16
async def netstorge_async(rpc_port: int, delta_block_height: str, start: str) -> None:
"""
Calculates the estimated space on the network given two block header hashes.
"""
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
if delta_block_height:
if start == "":
blockchain_state = await client.get_blockchain_state()
if blockchain_state["peak"] is None:
print("No blocks in blockchain")
client.close()
await client.await_closed()
return None
newer_block_height = blockchain_state["peak"].height
else:
newer_block = await client.get_block_record(hexstr_to_bytes(start))
if newer_block is None:
print("Block header hash", start, "not found.")
client.close()
await client.await_closed()
return None
else:
print("newer_height", newer_block.height)
newer_block_height = newer_block.height
newer_block_header = await client.get_block_record_by_height(newer_block_height)
older_block_height = max(0, newer_block_height - int(delta_block_height))
older_block_header = await client.get_block_record_by_height(older_block_height)
network_space_bytes_estimate = await client.get_network_space(
newer_block_header.header_hash, older_block_header.header_hash
)
print(
"Older Block\n"
f"Block Height: {older_block_header.height}\n"
f"Weight: {older_block_header.weight}\n"
f"VDF Iterations: {older_block_header.total_iters}\n"
f"Header Hash: 0x{older_block_header.header_hash}\n"
)
print(
"Newer Block\n"
f"Block Height: {newer_block_header.height}\n"
f"Weight: {newer_block_header.weight}\n"
f"VDF Iterations: {newer_block_header.total_iters}\n"
f"Header Hash: 0x{newer_block_header.header_hash}\n"
)
network_space_terabytes_estimate = network_space_bytes_estimate / 1024 ** 4
if network_space_terabytes_estimate > 1024:
print(f"The network has an estimated {network_space_terabytes_estimate / 1024:.3f} PiB")
else:
print(f"The network has an estimated {network_space_terabytes_estimate:.3f} TiB")
except Exception as e:
if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
print(f"Connection error. Check if full node rpc is running at {rpc_port}")
else:
print(f"Exception {e}")
client.close()
await client.await_closed()
| 44.960526
| 107
| 0.614282
|
990b0f51f2e563f1e3f053091a8c20837b22851d
| 21,234
|
py
|
Python
|
T5_shared.py
|
Verylovenlp/MinTL-SKKU
|
15b5cb870c7d6dcd0f5d895aac2806539cc5101f
|
[
"MIT"
] | 60
|
2020-09-24T06:17:49.000Z
|
2022-02-24T08:44:52.000Z
|
T5_shared.py
|
Verylovenlp/MinTL-SKKU
|
15b5cb870c7d6dcd0f5d895aac2806539cc5101f
|
[
"MIT"
] | 6
|
2020-11-11T02:04:23.000Z
|
2022-03-02T23:58:01.000Z
|
T5_shared.py
|
salesforce/CASPI
|
3e4cd23f4f3d1fa7132ba89805366472c9fe5983
|
[
"BSD-3-Clause"
] | 13
|
2020-09-28T07:29:05.000Z
|
2022-02-06T15:04:27.000Z
|
from transformers import (AdamW, T5Tokenizer, T5ForConditionalGeneration, WEIGHTS_NAME,CONFIG_NAME)
from copy import deepcopy
import torch
from torch.nn import CrossEntropyLoss
import time
class MiniT5(T5ForConditionalGeneration):
def __init__(self, config):
super().__init__(config)
#make a copy of decoder for dst
# decoder_config = deepcopy(config)
# decoder_config.is_decoder = True
# self.dst_decoder = type(self.decoder)(decoder_config, self.shared)
# self.dst_decoder.load_state_dict(self.decoder.state_dict())
# self.dst_lm_head = type(self.lm_head)(config.d_model, config.vocab_size, bias=False)
# self.dst_lm_head.load_state_dict(self.lm_head.state_dict())
def tie_decoder(self):
pass
# def forward(
# self,
# input_ids=None,
# attention_mask=None,
# encoder_outputs=None,
# decoder_input_ids=None,
# decoder_attention_mask=None,
# lm_labels=None,
# inputs_embeds=None,
# decoder_inputs_embeds=None,
# head_mask=None,
# ):
# # DST forward or Response generation forward?
# if decoder_input_ids[0,0] == self.config.decoder_start_token_id:
# decoder = self.dst_decoder
# lm_head = self.dst_lm_head
# else:
# decoder = self.decoder
# lm_head = self.lm_head
# # Encode if needed (training, first prediction pass)
# if encoder_outputs is None:
# # Convert encoder inputs in embeddings if needed
# encoder_outputs = self.encoder(
# input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask
# )
# hidden_states = encoder_outputs[0]
# if lm_labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# # get decoder inputs from shifting lm labels to the right
# decoder_input_ids = self._shift_right(lm_labels)
# # Decode
# decoder_outputs = decoder(
# input_ids=decoder_input_ids,
# attention_mask=decoder_attention_mask,
# inputs_embeds=decoder_inputs_embeds,
# encoder_hidden_states=hidden_states,
# encoder_attention_mask=attention_mask,
# head_mask=head_mask,
# )
# sequence_output = decoder_outputs[0]
# # Rescale output before projecting on vocab
# # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
# sequence_output = sequence_output * (self.model_dim ** -0.5)
# lm_logits = lm_head(sequence_output)
# decoder_outputs = (lm_logits,) #+ decoder_outputs[1:] # Add hidden states and attention if they are here
# if lm_labels is not None:
# loss_fct = CrossEntropyLoss(ignore_index=-100)
# loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), lm_labels.view(-1))
# decoder_outputs = (
# loss,
# ) + decoder_outputs # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
# return decoder_outputs + encoder_outputs
@torch.no_grad()
def generate(
self,
input_ids=None,
max_length=None,
min_length=None,
do_sample=None,
early_stopping=None,
num_beams=None,
temperature=None,
top_k=None,
top_p=None,
repetition_penalty=None,
bad_words_ids=None,
bos_token_id=None,
pad_token_id=None,
eos_token_id=None,
length_penalty=None,
no_repeat_ngram_size=None,
num_return_sequences=None,
attention_mask=None,
decoder_start_token_id=None,
):
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
logger.warning(
"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id)
)
pad_token_id = eos_token_id
# current position and vocab size
vocab_size = self.config.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
decoder_start_token_id = bos_token_id
assert (
decoder_start_token_id is not None
), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
if isinstance(decoder_start_token_id, int):
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
else:
# pass a batch of start tokens, but doesn't support beam search and sampling
input_ids=decoder_start_token_id
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
bos_token_id=bos_token_id,
pad_token_id=pad_token_id,
decoder_start_token_id=decoder_start_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
)
return output
def inference(
self,
tokenizer,
reader,
prev,
input_ids=None,
attention_mask=None,
turn_domain=None,
db=None
):
#start = time.time()
dst_outputs = self.generate(input_ids=input_ids,
attention_mask=attention_mask,
eos_token_id=tokenizer.encode("<eos_b>")[0],
decoder_start_token_id=self.config.decoder_start_token_id,
max_length=200,
)
#dst_time = time.time()-start
#print(dst_time)
dst_outputs = dst_outputs.tolist()
#length = len(dst_outputs[0])
#print(dst_outputs)
# DST_UPDATE -> DST
#check whether need to add eos
#dst_outputs = [dst+tokenizer.encode("<eos_b>") for dst in dst_outputs]
batch_size = input_ids.shape[0]
constraint_dict_updates = [reader.bspan_to_constraint_dict(tokenizer.decode(dst_outputs[i])) for i in range(batch_size)]
if prev['bspn']:
# update the belief state
dst_outputs = [reader.update_bspn(prev_bspn=prev['bspn'][i], bspn_update=dst_outputs[i]) for i in range(batch_size)]
# compute the DB state using the updated domain
db_state = []
for bi, bspn_list in enumerate(dst_outputs):
# if not constraint_dict_updates[bi]:
# # if nothing to update
# db_state.append(tokenizer.encode("[db_state0]"))
# else:
# turn_domain = 'general'
# for domain in constraint_dict_updates[bi].keys():
# #the last updated domain
# turn_domain=domain
# follow damd for fair comparison
db_vector = reader.bspan_to_DBpointer(tokenizer.decode(bspn_list), turn_domain[bi])
if sum(db_vector)==0:
db_state.append(tokenizer.encode("[db_state0]"))
else:
db_state.append([tokenizer.encode("[db_state0]")[0] + db_vector.index(1)+1])
# use gold booking pointer, because we cannot issue BOOKING API
if db[bi][0]>=tokenizer.encode("[db_state0+bookfail]")[0]:
if db[bi][0]>=tokenizer.encode("[db_state0+booksuccess]")[0]:
db_state[-1][0]+=10
else:
db_state[-1][0]+=5
db_state = torch.tensor(
db_state,
dtype=torch.long,
device=next(self.parameters()).device,
)
resp_outputs = self.generate(input_ids=input_ids,
attention_mask=attention_mask,
eos_token_id=tokenizer.encode("<eos_r>")[0],
decoder_start_token_id=db_state,
max_length=200,
)
resp_outputs = resp_outputs[:,1:].tolist() #skip DB state
# print("DST:", tokenizer.decode(dst_outputs[0]))
# print("RESP:", tokenizer.decode(resp_outputs[0]))
return dst_outputs, resp_outputs#, dst_time, length
def inference_sequicity(
self,
tokenizer,
reader,
prev,
input_ids=None,
attention_mask=None,
turn_domain=None,
db=None
):
#start = time.time()
dst_outputs = self.generate(input_ids=input_ids,
attention_mask=attention_mask,
eos_token_id=tokenizer.encode("<eos_b>")[0],
decoder_start_token_id=self.config.decoder_start_token_id,
max_length=200,
)
#dst_time = time.time() - start
#print(dst_time)
dst_outputs = dst_outputs.tolist()
#length = len(dst_outputs[0])
# compute the DB state using the updated domain
db_state = []
for bi, bspn_list in enumerate(dst_outputs):
db_vector = reader.bspan_to_DBpointer(tokenizer.decode(bspn_list), turn_domain[bi])
if sum(db_vector)==0:
db_state.append(tokenizer.encode("[db_state0]"))
else:
db_state.append([tokenizer.encode("[db_state0]")[0] + db_vector.index(1)+1])
# use gold booking pointer, because we cannot issue BOOKING API
if db[bi][0]>=tokenizer.encode("[db_state0+bookfail]")[0]:
if db[bi][0]>=tokenizer.encode("[db_state0+booksuccess]")[0]:
db_state[-1][0]+=10
else:
db_state[-1][0]+=5
db_state = torch.tensor(
db_state,
dtype=torch.long,
device=next(self.parameters()).device,
)
resp_outputs = self.generate(input_ids=input_ids,
attention_mask=attention_mask,
eos_token_id=tokenizer.encode("<eos_r>")[0],
decoder_start_token_id=db_state,
max_length=200,
)
resp_outputs = resp_outputs[:,1:].tolist() #skip DB state
# print("DST:", tokenizer.decode(dst_outputs[0]))
# print("RESP:", tokenizer.decode(resp_outputs[0]))
return dst_outputs, resp_outputs#, dst_time, length
| 45.468951
| 236
| 0.607658
|
1015ae15731d32ea6468eb098985d6db3acfabd6
| 10
|
py
|
Python
|
test_data/parse_retree/expected/character_set/complementing/suffix_dash/source.py
|
aas-core-works/aas-core-codegen
|
afec2cf363b6cb69816e7724a2b58626e2165869
|
[
"MIT"
] | 5
|
2021-12-29T12:55:34.000Z
|
2022-03-01T17:57:21.000Z
|
test_data/parse_retree/expected/character_set/complementing/suffix_dash/source.py
|
aas-core-works/aas-core-codegen
|
afec2cf363b6cb69816e7724a2b58626e2165869
|
[
"MIT"
] | 10
|
2021-12-29T02:15:55.000Z
|
2022-03-09T11:04:22.000Z
|
test_data/parse_retree/expected/character_set/complementing/suffix_dash/source.py
|
aas-core-works/aas-core-codegen
|
afec2cf363b6cb69816e7724a2b58626e2165869
|
[
"MIT"
] | 2
|
2021-12-29T01:42:12.000Z
|
2022-02-15T13:46:33.000Z
|
"[^a-z-]"
| 5
| 9
| 0.2
|
58dafc08268a1a80cb3439805d5aaaa73e99cf05
| 132
|
py
|
Python
|
config.py
|
delldu/SCNN
|
a6fadfbd9e593a28684dcaf29ca8ad425c1c700f
|
[
"MIT"
] | 1
|
2019-09-04T10:34:53.000Z
|
2019-09-04T10:34:53.000Z
|
config.py
|
delldu/SCNN
|
a6fadfbd9e593a28684dcaf29ca8ad425c1c700f
|
[
"MIT"
] | null | null | null |
config.py
|
delldu/SCNN
|
a6fadfbd9e593a28684dcaf29ca8ad425c1c700f
|
[
"MIT"
] | null | null | null |
Dataset_Path = dict(
CULane_path = "/home/lion/Dataset/CULane/data/CULane",
Tusimple_path = "/home/lion/Dataset/tusimple"
)
| 26.4
| 58
| 0.712121
|
1fab4aa952e3c0cfd4c7853024159ef86fa37499
| 114,523
|
py
|
Python
|
use_case_WebScrapingTextData.py
|
lilulamili/STATY_dev_test
|
7353572ec626647a69694f82d483039b6e1f5c40
|
[
"Apache-2.0"
] | null | null | null |
use_case_WebScrapingTextData.py
|
lilulamili/STATY_dev_test
|
7353572ec626647a69694f82d483039b6e1f5c40
|
[
"Apache-2.0"
] | null | null | null |
use_case_WebScrapingTextData.py
|
lilulamili/STATY_dev_test
|
7353572ec626647a69694f82d483039b6e1f5c40
|
[
"Apache-2.0"
] | 2
|
2021-12-15T18:24:56.000Z
|
2021-12-15T19:07:41.000Z
|
####################
# IMPORT LIBRARIES #
####################
from selectors import BaseSelector
import streamlit as st
import pandas as pd
import numpy as np
import plotly as dd
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.font_manager
import plotly.graph_objects as go
import functions as fc
import modelling as ml
import os
import altair as alt
import bs4
import requests
from collections import Counter
import streamlit.components.v1 as components
import yfinance as yf
import datetime
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import sys
import platform
import re
import base64
import time
from io import BytesIO
from pysummarization.nlpbase.auto_abstractor import AutoAbstractor
from pysummarization.tokenizabledoc.simple_tokenizer import SimpleTokenizer
from pysummarization.web_scraping import WebScraping
from pysummarization.abstractabledoc.std_abstractor import StdAbstractor
from pysummarization.abstractabledoc.top_n_rank_abstractor import TopNRankAbstractor
from sklearn.feature_extraction.text import CountVectorizer
from difflib import SequenceMatcher
#----------------------------------------------------------------------------------------------
def app():
# Clear cache
#st.legacy_caching.clear_cache()
# Hide traceback in error messages (comment out for de-bugging)
#sys.tracebacklimit = 0
# workaround for Firefox bug- hide the scrollbar while keeping the scrolling functionality
st.markdown("""
<style>
.ReactVirtualized__Grid::-webkit-scrollbar {
display: none;
}
.ReactVirtualized__Grid {
-ms-overflow-style: none; /* IE and Edge */
scrollbar-width: none; /* Firefox */
}
</style>
""", unsafe_allow_html=True)
#-------------------------------------------------------------------------
# RESET INPUT
#Session state
if 'key' not in st.session_state:
st.session_state['key'] = 0
reset_clicked = st.sidebar.button("Reset all your input")
if reset_clicked:
st.session_state['key'] = st.session_state['key'] + 1
st.sidebar.markdown("")
#------------------------------------------------------------------------------------------
# SETTINGS
settings_expander=st.sidebar.expander('Settings')
with settings_expander:
st.caption("**Precision**")
user_precision=int(st.number_input('Number of digits after the decimal point',min_value=0,max_value=10,step=1,value=4, key = st.session_state['key']))
#st.caption("**Help**")
#sett_hints = st.checkbox('Show learning hints', value=False)
st.caption("**Appearance**")
sett_wide_mode = st.checkbox('Wide mode', value=False, key = st.session_state['key'])
sett_theme = st.selectbox('Theme', ["Light", "Dark"], key = st.session_state['key'])
#sett_info = st.checkbox('Show methods info', value=False)
#sett_prec = st.number_input('Set the number of diggits for the output', min_value=0, max_value=8, value=2)
st.sidebar.markdown("")
st.sidebar.markdown("")
# Check if wide mode
if sett_wide_mode:
fc.wide_mode_func()
# Check theme
if sett_theme == "Dark":
fc.theme_func_dark()
if sett_theme == "Light":
fc.theme_func_light()
fc.theme_func_dl_button()
def cc():
st.legacy_caching.clear_cache()
st.session_state['load_data_button'] = None
#++++++++++++++++++++++++++++++++++++++++++++
# Text Mining and web-scraping
#++++++++++++++++++++++++++++++++++++++++++++
basic_text="Let STATY do text/web processing for you and start exploring your data stories right below... "
st.header('**Web scraping and text data**')
tw_meth = ['Text analysis','Web-Page summary', 'Financial analysis']
tw_classifier = st.selectbox('What analysis would you like to perform?', list('-')+tw_meth, key = st.session_state['key'], on_change=cc)
if tw_classifier in tw_meth:
st.markdown("")
st.markdown("")
st.header('**'+tw_classifier+'**')
st.markdown(basic_text)
#------------------------------------------------------------
# Text summarization
# -----------------------------------------------------------
if tw_classifier=='Web-Page summary':
# Clear cache
st.legacy_caching.clear_cache()
user_path = st.text_input("What what web page should I summarize in five sentences for you?","https://en.wikipedia.org/wiki/Data_mining")
run_models = st.button("Press to start the data processing...")
if run_models:
# Pysummarization of a web page:
def pysumMain(url):
web_scrape = WebScraping()
# Web-scraping:
document = web_scrape.scrape(url)
auto_abstractor = AutoAbstractor()
auto_abstractor.tokenizable_doc = SimpleTokenizer()
# Set delimiter for a sentence:
auto_abstractor.delimiter_list = [".", "\n"]
abstractable_doc = TopNRankAbstractor()
# Summarize a document:
result_dict = auto_abstractor.summarize(document, abstractable_doc)
# Set the limit for the number of output sentences:
limit = 5
i = 1
for sentence in result_dict["summarize_result"]:
st.write(sentence)
if i >= limit:
break
i += 1
#user_path = st.text_input("What what web page should I summarize in five sentences for you?","https://en.wikipedia.org/wiki/Data_mining")
if user_path !='':
a1, a2 = st.columns(2)
with a1:
st.subheader('Web page preview:')
st.text("")
components.iframe(user_path,width=None,height=500,scrolling=True)
with a2:
st.subheader('Web page summary:')
st.text("")
pysumMain(user_path)
if tw_classifier =='Stock data analysis':
# Clear cache
st.legacy_caching.clear_cache()
# dwonload first the list of comanies in the S&P500 and DAX indices
payload=pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
first_table = payload[0]
df = first_table
symbols = df['Symbol'].values.tolist()
company = df['Security'].values.tolist()
sector = df['GICS Sector'].values.tolist()
#sectors = set(sectors)
payload1=pd.read_html('https://en.wikipedia.org/wiki/DAX')
DAXtable = payload1[3]
df=DAXtable
DAXsymbols = df['Ticker symbol'].values.tolist()
DAXSector = df['Prime Standard Sector'].values.tolist()
DAXcompany= df['Company'].values.tolist()
#Merge indices data
symbols_all=symbols+DAXsymbols
sector_all=sector+DAXSector
company_all=company+DAXcompany
#ticker specification
st.subheader('Stock data analysis')
a3, a4 = st.columns(2)
with a3:
selected_stock = st.text_input("Enter a stock ticker symbol", "TSLA")
symbols_all=list('-')+symbols_all
selected_symbol = st.selectbox('You can add an additional stock for comparision...',symbols_all)
with a4:
today = datetime.date.today()
last_year = today - datetime.timedelta(days=365)
start_date = st.date_input('Select start date', last_year)
end_date = st.date_input('Select end date', today)
if start_date > end_date:
st.error('ERROR: End date must fall after start date.')
st.markdown("")
add_data_show=st.checkbox("Get additional data (cashflow, balance sheet etc.)", value = False)
st.markdown("")
dev_expander_perf = st.expander("Stock performance")
with dev_expander_perf:
#get data for a selected ticker symbol:
stock_data = yf.Ticker(selected_stock)
stock_df = stock_data.history(period='1d', start=start_date, end=end_date)
add_stock_data = yf.Ticker(selected_symbol)
add_stock_df = add_stock_data.history(period='1d', start=start_date, end=end_date)
#print stock values
if st.checkbox("Show stock data for " + selected_stock, value = True):
st.write(stock_df)
if selected_symbol !="-":
if st.checkbox("Show stock data for " + selected_symbol, value = False):
st.write(add_stock_df)
comparision_check=st.checkbox('Compare '+ selected_stock + " & " + selected_symbol, value = True)
#draw line chart with stock prices
a5, a6 = st.columns(2)
with a5:
stock_para= st.selectbox('Select ' + selected_stock + " info to draw", stock_df.columns)
if selected_symbol !="-":
if comparision_check:
st.subheader('Daily data comparision '+ selected_stock + " & " + selected_symbol)
c1=selected_stock + " " + stock_para
c2=selected_symbol + " " + stock_para
c1_data=stock_df[[stock_para]]
c1_data.rename(columns={c1_data.columns[0]: c1 }, inplace = True)
c2_data=add_stock_df[[stock_para]]
c2_data.rename(columns={c2_data.columns[0]: c2 }, inplace = True)
stock_dataToplot=pd.concat([c1_data, c2_data], axis=1)
#st.write(stock_dataToplot)
st.line_chart(stock_dataToplot)
else:
st.subheader(stock_para + " price for " + selected_stock + " (daily)")
stock_dataToplot=stock_df[stock_para]
st.line_chart(stock_dataToplot)
else:
st.subheader(stock_para + " price for " + selected_stock + " (daily)")
stock_dataToplot=stock_df[stock_para]
st.line_chart(stock_dataToplot)
with a6:
stock_para2= st.selectbox('Select ' + selected_stock + " info to draw", stock_df.columns, index=3)
if selected_symbol !="-":
if comparision_check:
st.subheader('Daily data comparision '+ selected_stock + " & " + selected_symbol)
c3=selected_stock + " " + stock_para2
c4=selected_symbol + " " + stock_para2
c3_data=stock_df[[stock_para2]]
c3_data.rename(columns={c3_data.columns[0]: c3 }, inplace = True)
c4_data=add_stock_df[[stock_para2]]
c4_data.rename(columns={c4_data.columns[0]: c4 }, inplace = True)
stock_dataToplot2=pd.concat([c3_data, c4_data], axis=1)
#st.write(stock_dataToplot)
st.line_chart(stock_dataToplot2)
else:
st.subheader(stock_para2 + " price for " + selected_stock + " (daily)")
stock_dataToplot2=stock_df[stock_para2]
st.line_chart(stock_dataToplot2)
else:
st.subheader(stock_para2 + " price for " + selected_stock + " (daily)")
stock_dataToplot2=stock_df[stock_para2]
st.line_chart(stock_dataToplot2)
if add_data_show:
dev_expander_cf = st.expander("Cashflow")
with dev_expander_cf:
st.subheader(selected_stock)
stock_data_cf = yf.Ticker(selected_stock).cashflow
st.write(stock_data_cf)
if selected_symbol !='-':
st.subheader(selected_symbol)
st.write(yf.Ticker(selected_symbol).cashflow)
dev_expander_bs = st.expander("Balance sheet")
with dev_expander_bs:
st.subheader(selected_stock)
stock_data = yf.Ticker(selected_stock)
stock_data_fi = stock_data.balance_sheet
st.write(stock_data_fi)
if selected_symbol !='-':
st.subheader(selected_symbol)
st.write(yf.Ticker(selected_symbol).balance_sheet)
dev_expander_fi = st.expander("Other financials")
with dev_expander_fi:
st.subheader(selected_stock)
stock_data = yf.Ticker(selected_stock)
stock_data_fi = stock_data.financials
st.write(stock_data_fi)
if selected_symbol !='-':
st.subheader(selected_symbol)
st.write(yf.Ticker(selected_symbol).financials)
dev_expander_info = st.expander("Stock basic info")
with dev_expander_info:
st.subheader(selected_stock)
stock_data = yf.Ticker(selected_stock)
st.write(stock_data.info ['longBusinessSummary'])
if selected_symbol !='-':
st.subheader(selected_symbol)
st.write(yf.Ticker(selected_symbol).info ['longBusinessSummary'])
# ----------------------------------------------------------------
# Text Mining
#-----------------------------------------------------------------
if tw_classifier=='Text analysis':
# Clear cache
st.legacy_caching.clear_cache()
run_text_OK=False
text_cv = CountVectorizer()
user_color=21
def random_color_func(user_col,word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None):
h = int(user_color)
s = int(100.0 * 255.0 / 255.0)
l = int(100.0 * float(random_state.randint(60, 120)) / 255.0)
return "hsl({}, {}%, {}%)".format(h, s, l)
#specify the data source
word_sl=st.radio('Select the data source for text analysis',['text input','web page'])
if word_sl=='text input':
user_text=st.text_area('Please enter or copy your text here', value='STATY \n\n STATY is growing out of the effort to bring more data insights to university education across all disciplines of the natural and social sciences. It is motivated by the belief that fostering data literacy, creativity and critical thinking are more effective towards innovation, than bringing endless units of introduction to programming to students who find learning programming an overwhelming task. By providing easy access to the methods of classical statistics and machine learning, STATY’s approach is to inspire students to explore issues they are studying in the curriculum directly on real data, practice interpreting the results and check the source code to see how it is done or to improve the code. STATY can be used in the process of teaching and learning data science, demonstrations of theoretical concepts across various disciplines, active learning, promotion of teamwork, research and beyond.', height=600, key = st.session_state['key'] )
st.write("")
if len(user_text)>0:
run_text_OK = True
elif word_sl=='web page':
user_path_wp = st.text_input("What web page should I analyse?","https://en.wikipedia.org/wiki/Data_mining", key = st.session_state['key'])
st.write("")
if user_path_wp !='':
web_scrape = WebScraping()
user_text = web_scrape.scrape(user_path_wp)
run_text_OK = True
if run_text_OK == True:
# Basic text processing:
text_cv_fit=text_cv.fit_transform([user_text])
wordcount= pd.DataFrame(text_cv_fit.toarray().sum(axis=0), index=text_cv.get_feature_names(),columns=["Word count"])
word_sorted=wordcount.sort_values(by=["Word count"], ascending=False)
#Stop words handling:
stopword_selection=st.selectbox("Select stop word option",["No stop words (use all words)","Manually select stop words", "Use a built-in list of stop words in German", "Use a built-in list of stop words in English", "Specify stop words"], index=3, key=st.session_state['key'])
if stopword_selection=="No stop words (use all words)":
word_stopwords=[]
elif stopword_selection=="Manually select stop words":
word_stopwords=st.multiselect("Select stop words (words to be removed from the text)", word_sorted.index.tolist(),word_sorted.index[1:min(10,len(word_sorted.index))].tolist(), key = st.session_state['key'])
elif stopword_selection=="Use a built-in list of stop words in German":
word_stopwords=["a","ab","aber","abermaliges","abermals","abgerufen","abgerufene","abgerufener","abgerufenes","abgesehen","ach","acht","achte","achten","achter","achtes","aehnlich","aehnliche","aehnlichem","aehnlichen","aehnlicher","aehnliches","aehnlichste","aehnlichstem","aehnlichsten","aehnlichster","aehnlichstes","aeusserst","aeusserste","aeusserstem","aeussersten","aeusserster","aeusserstes","ag","ähnlich","ähnliche","ähnlichem","ähnlichen","ähnlicher","ähnliches","ähnlichst","ähnlichste","ähnlichstem","ähnlichsten","ähnlichster","ähnlichstes","alle","allein","alleine","allem","allemal","allen","allenfalls","allenthalben","aller","allerdings","allerlei","alles","allesamt","allg","allg.","allgemein","allgemeine","allgemeinem","allgemeinen","allgemeiner","allgemeines","allgemeinste","allgemeinstem","allgemeinsten","allgemeinster","allgemeinstes","allmählich","allzeit","allzu","als","alsbald","also","am","an","and","andauernd","andauernde","andauerndem","andauernden","andauernder","andauerndes","ander","andere","anderem","anderen","anderenfalls","anderer","andererseits","anderes","anderm","andern","andernfalls","anderr","anders","anderst","anderweitig","anderweitige","anderweitigem","anderweitigen","anderweitiger","anderweitiges","anerkannt","anerkannte","anerkannter","anerkanntes","anfangen","anfing","angefangen","angesetze","angesetzt","angesetzten","angesetzter","ans","anscheinend","ansetzen","ansonst","ansonsten","anstatt","anstelle","arbeiten","au","auch","auf","aufgehört","aufgrund","aufhören","aufhörte","aufzusuchen","augenscheinlich","augenscheinliche","augenscheinlichem","augenscheinlichen","augenscheinlicher","augenscheinliches","augenscheinlichst","augenscheinlichste","augenscheinlichstem","augenscheinlichsten","augenscheinlichster","augenscheinlichstes","aus","ausdrücken","ausdrücklich","ausdrückliche","ausdrücklichem","ausdrücklichen","ausdrücklicher","ausdrückliches","ausdrückt","ausdrückte","ausgenommen","ausgenommene","ausgenommenem","ausgenommenen","ausgenommener","ausgenommenes","ausgerechnet","ausgerechnete","ausgerechnetem","ausgerechneten","ausgerechneter","ausgerechnetes","ausnahmslos","ausnahmslose","ausnahmslosem","ausnahmslosen","ausnahmsloser","ausnahmsloses","außen","außer","ausser","außerdem","ausserdem","außerhalb","äusserst","äusserste","äusserstem","äussersten","äusserster","äusserstes","author","autor","b","baelde","bald","bälde","bearbeite","bearbeiten","bearbeitete","bearbeiteten","bedarf","bedürfen","bedurfte","been","befahl","befiehlt","befiehlte","befohlene","befohlens","befragen","befragte","befragten","befragter","begann","beginnen","begonnen","behalten","behielt","bei","beide","beidem","beiden","beider","beiderlei","beides","beim","beinahe","beisammen","beispiel","beispielsweise","beitragen","beitrugen","bekannt","bekannte","bekannter","bekanntlich","bekanntliche","bekanntlichem","bekanntlichen","bekanntlicher","bekanntliches","bekennen","benutzt","bereits","berichten","berichtet","berichtete","berichteten","besonders","besser","bessere","besserem","besseren","besserer","besseres","bestehen","besteht","besten","bestenfalls","bestimmt","bestimmte","bestimmtem","bestimmten","bestimmter","bestimmtes","beträchtlich","beträchtliche","beträchtlichem","beträchtlichen","beträchtlicher","beträchtliches","betraechtlich","betraechtliche","betraechtlichem","betraechtlichen","betraechtlicher","betraechtliches","betreffend","betreffende","betreffendem","betreffenden","betreffender","betreffendes","bevor","bez","bez.","bezgl","bezgl.","bezueglich","bezüglich","bietet","bin","bis","bisher","bisherige","bisherigem","bisherigen","bisheriger","bisheriges","bislang","bisschen","bist","bitte","bleiben","bleibt","blieb","bloss","böden","boeden","brachte","brachten","brauchen","braucht","bräuchte","bringen","bsp","bsp.","bspw","bspw.","bzw","bzw.","c","ca","ca.","circa","d","d.h","da","dabei","dadurch","dafuer","dafür","dagegen","daher","dahin","dahingehend","dahingehende","dahingehendem","dahingehenden","dahingehender","dahingehendes","dahinter","damalige","damaligem","damaligen","damaliger","damaliges","damals","damit","danach","daneben","dank","danke","danken","dann","dannen","daran","darauf","daraus","darf","darfst","darin","darüber","darüberhinaus","darueber","darueberhinaus","darum","darunter","das","dasein","daselbst","daß","dass","dasselbe","Dat","davon","davor","dazu","dazwischen","dein","deine","deinem","deinen","deiner","deines","dem","dementsprechend","demgegenüber","demgegenueber","demgemaess","demgemäß","demgemäss","demnach","demselben","demzufolge","den","denen","denkbar","denkbare","denkbarem","denkbaren","denkbarer","denkbares","denn","dennoch","denselben","der","derart","derartig","derartige","derartigem","derartigen","derartiger","derem","deren","derer","derjenige","derjenigen","dermaßen","dermassen","derselbe","derselben","derzeit","derzeitig","derzeitige","derzeitigem","derzeitigen","derzeitiges","des","deshalb","desselben","dessen","dessenungeachtet","desto","desungeachtet","deswegen","dich","die","diejenige","diejenigen","dies","diese","dieselbe","dieselben","diesem","diesen","dieser","dieses","diesseitig","diesseitige","diesseitigem","diesseitigen","diesseitiger","diesseitiges","diesseits","dinge","dir","direkt","direkte","direkten","direkter","doch","doppelt","dort","dorther","dorthin","dran","drauf","drei","dreißig","drin","dritte","dritten","dritter","drittes","drüber","drueber","drum","drunter","du","duerfte","duerften","duerftest","duerftet","dunklen","durch","durchaus","durchweg","durchwegs","dürfen","dürft","durfte","dürfte","durften","dürften","durftest","dürftest","durftet","dürftet","e","eben","ebenfalls","ebenso","ect","ect.","ehe","eher","eheste","ehestem","ehesten","ehester","ehestes","ehrlich","ei","ei,","eigen","eigene","eigenem","eigenen","eigener","eigenes","eigenst","eigentlich","eigentliche","eigentlichem","eigentlichen","eigentlicher","eigentliches","ein","einander","einbaün","eine","einem","einen","einer","einerlei","einerseits","eines","einfach","einführen","einführte","einführten","eingesetzt","einig","einige","einigem","einigen","einiger","einigermaßen","einiges","einmal","einmalig","einmalige","einmaligem","einmaligen","einmaliger","einmaliges","eins","einseitig","einseitige","einseitigen","einseitiger","einst","einstmals","einzig","elf","empfunden","en","ende","endlich","entgegen","entlang","entsprechend","entsprechende","entsprechendem","entsprechenden","entsprechender","entsprechendes","entweder","er","ergänze","ergänzen","ergänzte","ergänzten","ergo","erhält","erhalten","erhielt","erhielten","erneut","ernst","eröffne","eröffnen","eröffnet","eröffnete","eröffnetes","erscheinen","erst","erste","erstem","ersten","erster","erstere","ersterem","ersteren","ersterer","ersteres","erstes","es","etc","etc.","etliche","etlichem","etlichen","etlicher","etliches","etwa","etwaige","etwas","euch","euer","eure","eurem","euren","eurer","eures","euretwegen","f","fall","falls","fand","fast","ferner","fertig","finde","finden","findest","findet","folgend","folgende","folgendem","folgenden","folgender","folgendermassen","folgendes","folglich","for","fordern","fordert","forderte","forderten","fort","fortsetzen","fortsetzt","fortsetzte","fortsetzten","fragte","frau","frei","freie","freier","freies","früher","fuer","fuers","fünf","fünfte","fünften","fünfter","fünftes","für","fürs","g","gab","gaenzlich","gaenzliche","gaenzlichem","gaenzlichen","gaenzlicher","gaenzliches","gängig","gängige","gängigen","gängiger","gängiges","ganz","ganze","ganzem","ganzen","ganzer","ganzes","gänzlich","gänzliche","gänzlichem","gänzlichen","gänzlicher","gänzliches","gar","gbr","geb","geben","geblieben","gebracht","gedurft","geehrt","geehrte","geehrten","geehrter","gefallen","gefälligst","gefällt","gefiel","gegeben","gegen","gegenüber","gegenueber","gehabt","gehalten","gehen","geht","gekannt","gekommen","gekonnt","gemacht","gemaess","gemäss","gemeinhin","gemocht","gemusst","genau","genommen","genug","gepriesener","gepriesenes","gerade","gern","gesagt","geschweige","gesehen","gestern","gestrige","getan","geteilt","geteilte","getragen","getrennt","gewesen","gewiss","gewisse","gewissem","gewissen","gewisser","gewissermaßen","gewisses","gewollt","geworden","ggf","ggf.","gib","gibt","gilt","ging","gleich","gleiche","gleichem","gleichen","gleicher","gleiches","gleichsam","gleichste","gleichstem","gleichsten","gleichster","gleichstes","gleichwohl","gleichzeitig","gleichzeitige","gleichzeitigem","gleichzeitigen","gleichzeitiger","gleichzeitiges","glücklicherweise","gluecklicherweise","gmbh","gott","gottseidank","gratulieren","gratuliert","gratulierte","groesstenteils","groß","gross","große","grosse","großen","grossen","großer","grosser","großes","grosses","grösstenteils","gruendlich","gründlich","gut","gute","guten","guter","gutes","h","hab","habe","haben","habt","haette","haeufig","haeufige","haeufigem","haeufigen","haeufiger","haeufigere","haeufigeren","haeufigerer","haeufigeres","halb","hallo","halten","hast","hat","hätt","hatte","hätte","hatten","hätten","hattest","hattet","häufig","häufige","häufigem","häufigen","häufiger","häufigere","häufigeren","häufigerer","häufigeres","heisst","hen","her","heraus","herein","herum","heute","heutige","heutigem","heutigen","heutiger","heutiges","hier","hierbei","hiermit","hiesige","hiesigem","hiesigen","hiesiger","hiesiges","hin","hindurch","hinein","hingegen","hinlanglich","hinlänglich","hinten","hintendran","hinter","hinterher","hinterm","hintern","hinunter","hoch","höchst","höchstens","http","hundert","i","ich","igitt","ihm","ihn","ihnen","ihr","ihre","ihrem","ihren","ihrer","ihres","ihretwegen","ihrige","ihrigen",
"ihriges","im","immer","immerhin","immerwaehrend","immerwaehrende","immerwaehrendem","immerwaehrenden","immerwaehrender","immerwaehrendes","immerwährend","immerwährende","immerwährendem","immerwährenden","immerwährender","immerwährendes","immerzu","important","in","indem","indessen","Inf.","info","infolge","infolgedessen","information","innen","innerhalb","innerlich","ins","insbesondere","insgeheim","insgeheime","insgeheimer","insgesamt","insgesamte","insgesamter","insofern","inzwischen","irgend","irgendein","irgendeine","irgendeinem","irgendeiner","irgendeines","irgendetwas","irgendjemand","irgendjemandem","irgendwann","irgendwas","irgendwelche","irgendwen","irgendwenn","irgendwer","irgendwie","irgendwo","irgendwohin","ist","j","ja","jaehrig","jaehrige","jaehrigem","jaehrigen","jaehriger","jaehriges","jahr","jahre","jahren","jährig","jährige","jährigem","jährigen","jähriges","je","jede","jedem","jeden","jedenfalls","jeder","jederlei","jedermann","jedermanns","jedes","jedesmal","jedoch","jeglichem","jeglichen","jeglicher","jegliches","jemals","jemand","jemandem","jemanden","jemandes","jene","jenem","jenen","jener","jenes","jenseitig","jenseitigem","jenseitiger","jenseits","jetzt","jung","junge","jungem","jungen","junger","junges","k","kaeumlich","kam","kann","kannst","kaum","käumlich","kein","keine","keinem","keinen","keiner","keinerlei","keines","keineswegs","klar","klare","klaren","klares","klein","kleine","kleinen","kleiner","kleines","koennen","koennt","koennte","koennten","koenntest","koenntet","komme","kommen","kommt","konkret","konkrete","konkreten","konkreter","konkretes","könn","können","könnt","konnte","könnte","konnten","könnten","konntest","könntest","konntet","könntet","kuenftig","kuerzlich","kuerzlichst","künftig","kurz","kürzlich","kürzlichst","l","laengst","lag","lagen","lang","lange","langsam","längst","längstens","lassen","laut","lediglich","leer","legen","legte","legten","leicht","leide","leider","lesen","letze","letzte","letzten","letztendlich","letztens","letztere","letzterem","letzterer","letzteres","letztes","letztlich","lichten","lieber","liegt","liest","links","los","m","mache","machen","machst","macht","machte","machten","mag","magst","mahn","mal","man","manch","manche","manchem","manchen","mancher","mancherlei","mancherorts","manches","manchmal","mann","margin","massgebend","massgebende","massgebendem","massgebenden","massgebender","massgebendes","massgeblich","massgebliche","massgeblichem","massgeblichen","massgeblicher","mehr","mehrere","mehrerer","mehrfach","mehrmalig","mehrmaligem","mehrmaliger","mehrmaliges","mein","meine","meinem","meinen","meiner","meines","meinetwegen","meins","meist","meiste","meisten","meistens","meistenteils","mensch","menschen","meta","mich","mindestens","mir","mit","miteinander","mitgleich","mithin","mitnichten","mittel","mittels","mittelst","mitten","mittig","mitunter","mitwohl","mochte","möchte","mochten","möchten","möchtest","moechte","moeglich","moeglichst","moeglichste","moeglichstem","moeglichsten","moeglichster","mögen","möglich","mögliche","möglichen","möglicher","möglicherweise","möglichst","möglichste","möglichstem","möglichsten","möglichster","mögt","morgen","morgige","muessen","muesst","muesste","muß","muss","müssen","mußt","musst","müßt","müsst","musste","müsste","mussten","müssten","n","na","nach","nachdem","nacher","nachher","nachhinein","nächste","nacht","naechste","naemlich","nahm","nämlich","naturgemaess","naturgemäss","natürlich","ncht","neben","nebenan","nehmen","nein","neu","neue","neuem","neuen","neuer","neuerdings","neuerlich","neuerliche","neuerlichem","neuerlicher","neuerliches","neues","neulich","neun","neunte","neunten","neunter","neuntes","nicht","nichts","nichtsdestotrotz","nichtsdestoweniger","nie","niemals","niemand","niemandem","niemanden","niemandes","nimm","nimmer","nimmt","nirgends","nirgendwo","noch","noetigenfalls","nötigenfalls","nun","nur","nutzen","nutzt","nützt","nutzung","o","ob","oben","ober","oberen","oberer","oberhalb","oberste","obersten","oberster","obgleich","obs","obschon","obwohl","oder","oefter","oefters","off","offen","offenkundig","offenkundige","offenkundigem","offenkundigen","offenkundiger","offenkundiges","offensichtlich","offensichtliche","offensichtlichem","offensichtlichen","offensichtlicher","offensichtliches","oft","öfter","öfters","oftmals","ohne","ohnedies","online","ordnung","p","paar","partout","per","persoenlich","persoenliche","persoenlichem","persoenlicher","persoenliches","persönlich","persönliche","persönlicher","persönliches","pfui","ploetzlich","ploetzliche","ploetzlichem","ploetzlicher","ploetzliches","plötzlich","plötzliche","plötzlichem","plötzlicher","plötzliches","pro","q","quasi","r","reagiere","reagieren","reagiert","reagierte","recht","rechte","rechten","rechter","rechtes","rechts","regelmäßig","reichlich","reichliche","reichlichem","reichlichen","reichlicher","restlos","restlose","restlosem","restlosen","restloser","restloses","richtig","richtiggehend","richtiggehende","richtiggehendem","richtiggehenden","richtiggehender","richtiggehendes","rief","rund","rundheraus","rundum","runter","s","sa","sache","sage","sagen","sagt","sagte","sagten","sagtest","sagtet","sah","samt","sämtliche","sang","sangen","satt","sattsam","schätzen","schätzt","schätzte","schätzten","scheinbar","scheinen","schlecht","schlechter","schlicht","schlichtweg","schließlich","schluss","schlussendlich","schnell","schon","schreibe","schreiben","schreibens","schreiber","schwerlich","schwerliche","schwerlichem","schwerlichen","schwerlicher","schwerliches","schwierig","sechs","sechste","sechsten","sechster","sechstes","sect","sehe","sehen","sehr","sehrwohl","seht","sei","seid","seien","seiest","seiet","sein","seine","seinem","seinen","seiner","seines","seit","seitdem","seite","seiten","seither","selbe","selben","selber","selbst","selbstredend","selbstredende","selbstredendem","selbstredenden","selbstredender","selbstredendes","seltsamerweise","senke","senken","senkt","senkte","senkten","setzen","setzt","setzte","setzten","sich","sicher","sicherlich","sie","sieben","siebente","siebenten","siebenter","siebentes","siebte","siehe","sieht","sind","singen","singt","so","sobald","sodaß","soeben","sofern","sofort","sog","sogar","sogleich","solang","solange","solc","solchen","solch","solche","solchem","solchen","solcher","solches","soll","sollen","sollst","sollt","sollte","sollten","solltest","solltet","somit","sondern","sonst","sonstig","sonstige","sonstigem","sonstiger","sonstwo","sooft","soviel","soweit","sowie","sowieso","sowohl","später","spielen","startet","startete","starteten","startseite","statt","stattdessen","steht","steige","steigen","steigt","stellenweise","stellenweisem","stellenweisen","stets","stieg","stiegen","such","suche","suchen","t","tag","tage","tagen","tages","tat","tät","tatsächlich","tatsächlichen","tatsächlicher","tatsächliches","tatsaechlich","tatsaechlichen","tatsaechlicher","tatsaechliches","tausend","teil","teile","teilen","teilte","teilten","tel","tief","titel","toll","total","trage","tragen","trägt","tritt","trotzdem","trug","tun","tust","tut","txt","u","übel","über","überall","überallhin","überaus","überdies","überhaupt","überll","übermorgen","üblicherweise","übrig","übrigens","ueber","ueberall","ueberallhin","ueberaus","ueberdies","ueberhaupt","uebermorgen","ueblicherweise","uebrig","uebrigens","uhr","um","ums","umso","umstaendehalber","umständehalber","unbedingt","unbedingte","unbedingter","unbedingtes","und","unerhoert","unerhoerte","unerhoertem","unerhoerten","unerhoerter","unerhoertes","unerhört","unerhörte","unerhörtem","unerhörten","unerhörter","unerhörtes","ungefähr","ungemein","ungewoehnlich","ungewoehnliche","ungewoehnlichem","ungewoehnlichen","ungewoehnlicher","ungewoehnliches","ungewöhnlich","ungewöhnliche","ungewöhnlichem","ungewöhnlichen","ungewöhnlicher","ungewöhnliches","ungleich","ungleiche","ungleichem","ungleichen","ungleicher","ungleiches","unmassgeblich","unmassgebliche","unmassgeblichem","unmassgeblichen","unmassgeblicher","unmassgebliches","unmoeglich","unmoegliche","unmoeglichem","unmoeglichen","unmoeglicher","unmoegliches","unmöglich","unmögliche","unmöglichen","unmöglicher","unnötig","uns","unsaeglich","unsaegliche","unsaeglichem","unsaeglichen","unsaeglicher","unsaegliches","unsagbar","unsagbare","unsagbarem","unsagbaren","unsagbarer","unsagbares","unsäglich","unsägliche","unsäglichem","unsäglichen","unsäglicher","unsägliches","unse","unsem","unsen","unser","unsere","unserem","unseren","unserer","unseres","unserm","unses","unsre","unsrem","unsren","unsrer","unsres","unstreitig","unstreitige","unstreitigem","unstreitigen","unstreitiger","unstreitiges","unten","unter","unterbrach","unterbrechen","untere","unterem","unteres","unterhalb","unterste","unterster","unterstes","unwichtig","unzweifelhaft","unzweifelhafte","unzweifelhaftem","unzweifelhaften","unzweifelhafter","unzweifelhaftes","usw","usw.","v","vergangen","vergangene","vergangenen","vergangener","vergangenes","vermag","vermögen","vermutlich","vermutliche","vermutlichem","vermutlichen","vermutlicher","vermutliches","veröffentlichen","veröffentlicher","veröffentlicht","veröffentlichte","veröffentlichten","veröffentlichtes","verrate","verraten","verriet","verrieten","version","versorge","versorgen","versorgt","versorgte","versorgten","versorgtes","viel","viele","vielem","vielen","vieler","vielerlei","vieles","vielleicht","vielmalig","vielmals","vier","vierte","vierten","vierter","viertes","voellig","voellige","voelligem","voelligen","voelliger","voelliges","voelligst","vollends","völlig","völlige","völligem","völligen","völliger","völliges","völligst","vollstaendig","vollstaendige","vollstaendigem","vollstaendigen","vollstaendiger","vollstaendiges","vollständig","vollständige","vollständigem","vollständigen","vollständiger","vollständiges","vom","von","vor","voran","vorbei","vorgestern","vorher","vorherig","vorherige","vorherigem","vorheriger",
"vorne","vorüber","vorueber","w","wachen","waehrend","waehrenddessen","waere","während","währenddem","währenddessen","wann","war","wär","wäre","waren","wären","warst","wart","warum","was","weder","weg","wegen","weil","weiß","weit","weiter","weitere","weiterem","weiteren","weiterer","weiteres","weiterhin","weitestgehend","weitestgehende","weitestgehendem","weitestgehenden","weitestgehender","weitestgehendes","weitgehend","weitgehende","weitgehendem","weitgehenden","weitgehender","weitgehendes","welche","welchem","welchen","welcher","welches","wem","wen","wenig","wenige","weniger","weniges","wenigstens","wenn","wenngleich","wer","werde","werden","werdet","weshalb","wessen","weswegen","wichtig","wie","wieder","wiederum","wieso","wieviel","wieviele","wievieler","wiewohl","will","willst","wir","wird","wirklich","wirklichem","wirklicher","wirkliches","wirst","wissen","wo","wobei","wodurch","wofuer","wofür","wogegen","woher","wohin","wohingegen","wohl","wohlgemerkt","wohlweislich","wolle","wollen","wollt","wollte","wollten","wolltest","wolltet","womit","womoeglich","womoegliche","womoeglichem","womoeglichen","womoeglicher","womoegliches","womöglich","womögliche","womöglichem","womöglichen","womöglicher","womögliches","woran","woraufhin","woraus","worden","worin","wuerde","wuerden","wuerdest","wuerdet","wurde","würde","wurden","würden","wurdest","würdest","wurdet","würdet","www","x","y","z","z.b","z.B.","zahlreich","zahlreichem","zahlreicher","zB","zb.","zehn","zehnte","zehnten","zehnter","zehntes","zeit","zeitweise","zeitweisem","zeitweisen","zeitweiser","ziehen","zieht","ziemlich","ziemliche","ziemlichem","ziemlichen","ziemlicher","ziemliches","zirka","zog","zogen","zu","zudem","zuerst","zufolge","zugleich","zuletzt","zum","zumal","zumeist","zumindest","zunächst","zunaechst","zur","zurück","zurueck","zusammen","zusehends","zuviel","zuviele","zuvieler","zuweilen","zwanzig","zwar","zwei","zweifelsfrei","zweifelsfreie","zweifelsfreiem","zweifelsfreien","zweifelsfreier","zweifelsfreies","zweite","zweiten","zweiter","zweites","zwischen","zwölf"]
elif stopword_selection=="Use a built-in list of stop words in English":
word_stopwords=['a','about','above','after','again','against','ain','all','am','an','and','any','are','aren',"aren't",'as','at','be','because','been','before','being','below','between','both','but','by','can','couldn',"couldn't",'d','did','didn',"didn't",'do','does','doesn',"doesn't",'doing','don',"don't",'down','during','each','few','for','from','further','had','hadn',"hadn't",'has','hasn',"hasn't",'have','haven',"haven't",'having','he','her','here','hers','herself','him','himself','his','how','i','if','in','into','is','isn',"isn't",'it',"it's",'its','itself','just','ll','m','ma','me','mightn',"mightn't",'more','most','mustn',"mustn't",'my','myself','needn',"needn't",'no','nor','not','now','o','of','off','on','once','only','or','other','our','ours','ourselves','out','over','own','re','s','same','shan',"shan't",'she',"she's",'should',"should've",'shouldn',"shouldn't",'so','some','such','t','than','that',"that'll",'the','their','theirs','them','themselves','then','there','these','they','this','those','through','to','too','under','until','up','ve','very','was','wasn',"wasn't",'we','were','weren',"weren't",'what','when','where','which','while','who','whom','why','will','with','won',"won't",'wouldn',"wouldn't",'y','you',"you'd","you'll","you're","you've",'your','yours','yourself','yourselves']
elif stopword_selection=="Specify stop words":
word_stopwords=[]
user_stopwords=st.text_area('Please enter or copy stop words here', value='', height=200, key = st.session_state['key'] )
if len(user_stopwords)>0:
stopwords_cv = CountVectorizer()
stopwords_cv_fit=stopwords_cv.fit_transform([user_stopwords])
word_stopwords=stopwords_cv.get_feature_names()
st.write("")
a4,a5=st.columns(2)
with a4:
# user specification of words to search
word_list=pd.DataFrame(columns=word_sorted.index)
#words_cleaned=word_list.drop(word_stopwords,axis=1)
words_cleaned=sorted(list(set(word_list)-set(word_stopwords)))
find_words=st.multiselect("Search sentences with following words",
words_cleaned, key = st.session_state['key'])
with a5:
#user-specification of n-grams
user_ngram=st.number_input("Specify the number of words to be extracted (n-grams)", min_value=1, value=2, key = st.session_state['key'])
if st.checkbox('Show a word count', value = False, key = st.session_state['key']):
st.write(word_sorted)
st.write("")
number_remove=st.checkbox("Remove numbers from text", value=True, key = st.session_state['key'])
a4,a5=st.columns(2)
with a4:
#WordCloud color specification
st.write("")
draw_WordCloud=st.checkbox("Create a Word Cloud", value=True, key = st.session_state['key'])
with a5:
if draw_WordCloud==True:
#color options for the WordCloud (user selection)
color_options= pd.DataFrame(np.array([[21, 120, 12, 240, 30]]),
columns=['orange', 'green', 'red','blue','brown'])
user_color_name=st.selectbox('Select the main color of your WordCloud',color_options.columns, key = st.session_state['key'])
user_color=color_options[user_color_name]
st.write("")
st.write("")
run_text = st.button("Press to start text processing...")
if run_text:
st.write("")
st.write("")
st.info("Text processing progress")
text_bar = st.progress(0.0)
progress = 0
#---------------------------------------------------------------------------------
# Basic NLP metrics and visualisations
#---------------------------------------------------------------------------------
wfreq_output = st.expander("Basic NLP metrics and visualisations ", expanded = False)
with wfreq_output:
# Word frequency
st.subheader('Word count')
#calculate word frequency - stop words exluded:
word_sorted=fc.cv_text(user_text, word_stopwords, 1,user_precision,number_remove)
st.write("")
st.write("Number of words: ", word_sorted["Word count"].sum())
st.write("Number of sentences", len(re.findall(r"([^.]*\.)" ,user_text)))
if len(word_stopwords)>0:
st.warning("All analyses are based on text with stop words removed!")
else:
st.warning("No stop words are removed from the text!")
st.write(word_sorted.style.format({"Rel. freq.": "{:.2f}"}))
a4,a5=st.columns(2)
with a4:
# relative frequency for the top 10 words
txt_bar=word_sorted.head(min(len(word_sorted),10))
fig = go.Figure()
fig.add_trace(go.Bar(x=txt_bar["Rel. freq."], y=txt_bar.index, name='',marker_color = 'indianred',opacity=0.5,orientation='h'))
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='relative fraction %', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Top " + str(min(len(word_sorted),10)) + " words relative frequency")
with a5:
fig = go.Figure(data=[go.Histogram(x=word_sorted["Word length"], histnorm='probability',marker_color ='steelblue',opacity=0.5)])
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='word length', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Word length distribution")
#word similarity vs. word length & word frequency
word_similarity=[]
for word in txt_bar.index:
d=0
for sword in txt_bar.index:
seq = SequenceMatcher(None,word,sword)
d = d+(seq.ratio()*100)
word_similarity.append([d])
txt_bar["Similarity"]=(np.float_(word_similarity)/len(txt_bar.index)).round(user_precision)
a4,a5=st.columns(2)
with a4:
# bubble chart
fig = go.Figure(data=[go.Scatter(
y=txt_bar.index, x=txt_bar["Rel. freq."], mode='markers',text=txt_bar["Similarity"],
marker_size=txt_bar["Similarity"],marker_color='indianred') ])
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='relative frequency', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Bubble size eq. average word similarity across the top " + str(min(len(word_sorted),10)) +" words")
with a5:
df_to_plot=word_sorted
df_to_plot['word']=word_sorted.index
fig = px.scatter(data_frame=df_to_plot, x='Word length', y='Rel. freq.',hover_data=['word','Word length', 'Rel. freq.'], color_discrete_sequence=['steelblue'])
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title="word length", titlefont_size=14, tickfont_size=14,),)
fig.update_layout(yaxis=dict(title="word frequency", titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white", ))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig)
st.info("A comparision of frequencies of short and long words")
# bigram distribution
cv2_output=fc.cv_text(user_text, word_stopwords, 2,user_precision,number_remove)
# trigram distribution
cv3_output=fc.cv_text(user_text, word_stopwords, 3,user_precision,number_remove)
a4,a5=st.columns(2)
with a4:
txt_bar=cv2_output.head(min(len(cv2_output),10))
fig = go.Figure()
fig.add_trace(go.Bar(x=txt_bar["Rel. freq."], y=txt_bar.index, name='',marker_color = 'indianred',opacity=0.5,orientation='h'))
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='relative fraction %', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Top " + str(min(len(cv2_output),10)) + " bigrams relative frequency")
with a5:
txt_bar=cv3_output.head(10)
fig = go.Figure()
fig.add_trace(go.Bar(x=txt_bar["Rel. freq."], y=txt_bar.index, name='',marker_color = 'indianred',opacity=0.5,orientation='h'))
fig.update_layout({'plot_bgcolor': 'rgba(0, 0, 0, 0)',})
fig.update_layout(xaxis=dict(title='relative fraction %', titlefont_size=14, tickfont_size=14,),)
fig.update_layout(hoverlabel=dict(bgcolor="white",align="left"))
fig.update_layout(height=400,width=400)
st.plotly_chart(fig, use_container_width=True)
st.info("Top " + str(min(len(cv2_output),10)) + " trigrams relative frequency")
if draw_WordCloud==True:
#Draw WordCloud
wordcloud = WordCloud(background_color="white",
contour_color="white",max_words=100,stopwords=word_stopwords,
width=600,height=400,color_func=random_color_func).generate(user_text)
fig_text, ax = plt.subplots()
ax=plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
st.subheader('WordCloud')
st.pyplot(fig_text)
progress += 1
text_bar.progress(progress/3)
# Download link
st.write("")
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
word_sorted.to_excel(excel_file, sheet_name="words",index=False)
if len(word_stopwords)>0:
pd.DataFrame(word_stopwords,columns=['stop words']).to_excel(excel_file, sheet_name="stop words",index=False)
if len(cv2_output)>0:
cv2_output.to_excel(excel_file, sheet_name="bigrams",index=True)
if len(cv3_output)>0:
cv3_output.to_excel(excel_file, sheet_name="trigrams",index=True)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "BasicTextAnalysis.xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download basic NLP metrics</a>
""",
unsafe_allow_html=True)
st.write("")
#---------------------------------------------------------------------------------
# Sentences with specific words
#---------------------------------------------------------------------------------
if len(find_words)>0:
# extract all sentences with specific words:
sentences_list=[]
sentences = re.findall(r"([^.]*\.)" ,user_text)
for sentence in sentences:
if all(word in sentence for word in find_words):
if len(sentence)<1000: # threshold for to long sentences is 1000 characters
sentences_list.append(sentence)
if len(sentences_list)>0:
sentences_output = st.expander("Sentences with specific words", expanded = False)
with sentences_output:
for sentence in sentences_list:
st.write(sentence)
#st.table(pd.DataFrame({'Sentences':sentences_list}))
# Download link
st.write("")
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
pd.DataFrame({'Sentences':sentences_list}).to_excel(excel_file, sheet_name="Sentences",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Sentences with specific words.xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download sentences</a>
""",
unsafe_allow_html=True)
st.write("")
progress += 1
text_bar.progress(progress/3)
#---------------------------------------------------------------------------------
# User specific n-grams
#---------------------------------------------------------------------------------
#extract n-grams:
ngram_list=[]
text_cv = fc.cv_text(user_text, word_stopwords,user_ngram,user_precision,number_remove)
#CountVectorizer(analyzer='word', stop_words=set(word_stopwords), ngram_range=(user_ngram, user_ngram))
#text_cv_fit=text_cv.fit_transform([user_text])
#listToString='. '.join(text_cv.get_feature_names())
listToString='. '.join(text_cv.index)
sentences = re.findall(r"([^.]*\.)" ,listToString)
for sentence in sentences:
if all(word in sentence for word in find_words):
sentence=re.sub('[.]', '', sentence)
ngram_list.append(sentence)
if len(ngram_list)>0:
ngram_output = st.expander("n-grams", expanded = False)
with ngram_output:
st.write("")
st.subheader("n-grams")
st.write("")
for sentence in ngram_list:
st.write(sentence)
# Download link
st.write("")
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
pd.DataFrame({'n-gram':ngram_list}).to_excel(excel_file, sheet_name=str(user_ngram) +"-gram",index=False)
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = str(user_ngram)+"gram.xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download n-grams</a>
""",
unsafe_allow_html=True)
st.write("")
progress += 1
text_bar.progress(progress/3)
# Success message
#st.success('Text processing completed')
#----------------------------------------------------
# Stock data analysis - Yahoo Finance
# ---------------------------------------------------
if tw_classifier=='Financial analysis':
st.write('Check stock prices and key performance indicators for companies included in [S&P 500] (https://en.wikipedia.org/wiki/List_of_S%26P_500_companies), [DAX] (https://en.wikipedia.org/wiki/DAX), [FTSE 100] (https://en.wikipedia.org/wiki/FTSE_100_Index), [CSI 300](https://en.wikipedia.org/wiki/CSI_300_Index), [Nikkei 225] (https://de.wikipedia.org/wiki/Nikkei_225), [CAC 40] (https://en.wikipedia.org/wiki/CAC_40), [BSE SENSEX] (https://en.wikipedia.org/wiki/BSE_SENSEX) and [KOSPI](https://en.wikipedia.org/wiki/KOSPI) indexes, or for any company available via [Yahoo Finance](https://finance.yahoo.com/). Note, the bulk download and data processing may take some time!')
a1,a2,a3=st.columns([1,3,1])
with a1:
stock_search_option=st.radio('',['Indexes', 'Symbol'])
with a2:
st.markdown("")
st.markdown("")
st.write("Start your analysis by either selecting companies from specific key indexes, or by entering a ticker symbol")
# delete session state if input widget change
def in_wid_change():
st.session_state['load_data_button'] = None
if stock_search_option !='Symbol':
# select ticker for KPI-Dashboard
co1 = st.container()
st.write('Consider stocks from the following indices:')
c1, c2, c3,c4 = st.columns(4)
ticker_options = []
SP500 = c1.checkbox('S&P 500', True, on_change=in_wid_change)
CSI300 = c1.checkbox('CSI 300', False, on_change=in_wid_change)
DAX = c2.checkbox('DAX', True, on_change=in_wid_change)
NIKKEI225 = c2.checkbox('NIKKEI 225', False, on_change=in_wid_change)
CAC40=c3.checkbox('CAC40', False, on_change=in_wid_change)
BSE_SENSEX=c3.checkbox('BSE SENSEX', False, on_change=in_wid_change)
FTSE = c4.checkbox('FTSE', False, on_change=in_wid_change)
KOSPI = c4.checkbox('KOSPI', False, on_change=in_wid_change)
else: #selected option is 'Symbol'
selected_stock = st.text_input("Enter at least one stock ticker symbol! Please use space for ticker symbol separation!", "TSLA")
selected_stock=(list(selected_stock.split(" ")))
selected_stock = list(filter(None, selected_stock))
list_companies=[]
list_symbols = []
list_sectors = []
list_stockindex = []
for i in range(len(selected_stock)):
sel_stock_string=selected_stock[i]
try:
yf.Ticker(sel_stock_string).info['longName']
except:
st.error('Cannot get any data on ' + sel_stock_string + ', so the ticker probably does not exist! \n Please check spelling, or try another ticker symbol!')
return
list_companies.append(yf.Ticker(selected_stock[i]).info['longName'])
list_sectors.append('XX')
list_stockindex.append('XX')
list_symbols = selected_stock
if len(selected_stock)==0:
selected_stock=[]
#--------------------------------
# Get lists of company's/tickers
# FUNCTIONS
# download list of companies
@st.cache()
def load_ticker():
#---------------------------------------------------------------------------------------------
symbols_SP, symbols_DAX, symbols_FTSE, symbols_BSE, symbols_CAC, symbols_CSI, symbols_KO, symbols_NIK=[],[],[],[],[],[],[],[]
company_SP , company_DAX , company_FTSE, company_BSE,company_CAC,company_CSI,company_KO,company_NIK=[],[],[],[],[],[],[],[]
sector_SP , sector_DAX , sector_FTSE, sector_BSE,sector_CAC,sector_CSI,sector_KO,sector_NIK=[],[],[],[],[],[],[],[]
index_SP , index_DAX , index_FTSE, index_BSE,index_CAC,index_CSI,index_KO,index_NIK=[],[],[],[],[],[],[],[]
#S&P500:
if SP500:
(symbols_SP,company_SP,sector_SP,index_SP)= fc.get_stock_list('S&P500','https://en.wikipedia.org/wiki/List_of_S%26P_500_companies',0, 0, 1, 3)
#DAX
if DAX:
(symbols_DAX,company_DAX,sector_DAX,index_DAX)=fc.get_stock_list('DAX','https://en.wikipedia.org/wiki/DAX',3, 3, 1, 2)
#FTSE
if FTSE:
(symbols_FTSE,company_FTSE,sector_FTSE,index_FTSE)=fc.get_stock_list('FTSE100','https://en.wikipedia.org/wiki/FTSE_100_Index',3, 1, 0, 2)
#CSI300
if CSI300:
(symbols_CSI,company_CSI,sector_CSI,index_CSI)=fc.get_stock_list('CSI300','https://en.wikipedia.org/wiki/CSI_300_Index',3, 0, 1, 4)
#NIKKEI225
if NIKKEI225:
(symbols_NIK,company_NIK,sector_NIK,index_NIK)=fc.get_stock_list('NIKKEI225','https://topforeignstocks.com/indices/the-components-of-the-nikkei-225-index/',0, 2, 1, 3)
#CAC40
if CAC40:
(symbols_CAC,company_CAC,sector_CAC,index_CAC)=fc.get_stock_list('CAC40','https://en.wikipedia.org/wiki/CAC_40',3, 3, 0, 1)
#BSE_SENSEX
if BSE_SENSEX:
(symbols_BSE,company_BSE,sector_BSE,index_BSE)=fc.get_stock_list('BSE_SENSEX','https://en.wikipedia.org/wiki/BSE_SENSEX',1, 2, 3, 4)
#S&P_TSX60
#(symbols_TS,company_TS,sector_TS,index_TS)=fc.get_stock_list('S&P_TSX60','https://topforeignstocks.com/indices/the-components-of-the-sptsx-composite-index/')
#KOSPI
if KOSPI:
(symbols_KO,company_KO,sector_KO,index_KO)=fc.get_stock_list('KOSPI','https://topforeignstocks.com/indices/the-components-of-the-korea-stock-exchange-kospi-index/',0, 2, 1, 3)
#---------------------------------------------------------------------------------------------
# merge into one dataframe
list_symbols = symbols_SP + symbols_DAX + symbols_FTSE+ symbols_BSE+symbols_CAC+symbols_CSI+symbols_KO+symbols_NIK
list_companies = company_SP + company_DAX + company_FTSE+ company_BSE+company_CAC+company_CSI+company_KO+company_NIK
list_sectors = sector_SP + sector_DAX + sector_FTSE+ sector_BSE+sector_CAC+sector_CSI+sector_KO+sector_NIK
list_stockindex = index_SP + index_DAX + index_FTSE+ index_BSE+index_CAC+index_CSI+index_KO+index_NIK
df_indicesdata = pd.DataFrame({'Ticker': list_symbols, 'Company': list_companies, 'Sector': list_sectors, 'Stock index': list_stockindex})
return df_indicesdata, symbols_SP, symbols_DAX, symbols_FTSE, symbols_BSE, symbols_CAC, symbols_CSI, symbols_KO, symbols_NIK
# define ticker object
@st.cache(allow_output_mutation=True)
def function_ticker():
ticker = []
for i in range(len(list_symbols)):
ticker.append(Class_ticker())
return ticker
# function for multiselect ticker vs. company
def ticker_dict_func(option):
return ticker_dict[option]
# function for multiselect index vs. company
def index_dict_func(option):
return ticker[option].company
# create df_selected
@st.cache(allow_output_mutation=True)
def function_df_selected():
df_selected = pd.DataFrame(columns=['Ticker', 'Company', 'Sector'])
index_list_rename = []
for i in range(len(selected_stock)):
df_selected = df_selected.append(df_indicesdata[df_indicesdata['Ticker']==selected_stock[i]])
index_list_rename += [i+1]
index_list = df_selected.index.tolist()
df_selected.index = index_list_rename
return df_selected, index_list
def data_available(label, source, selected_year):
if label in source.index and source.at[label, selected_year] is not None and source.at[label, selected_year] != 0 :
return True
else:
return False
# calculate average
def average_func(index_list, df_selected, df):
if len(index_list) > 1:
if len(df_selected['Stock index'])*2 == (len(df_selected[df_selected['Stock index']==df_selected.at[1, 'Stock index']])+len(df_selected[df_selected['Sector']==df_selected.at[1, 'Sector']])):
means = df.mean(axis=1)
df.insert(0, 'average' , means)
return df
# create df
def fill_df_func(df, ticker, i, label_index, label_column):
if df.empty:
df = pd.DataFrame(ticker[i].kpis, index=label_index, columns=[label_column])
else:
df.insert(len(df.columns) , label_column, ticker[i].kpis)
return df
#----------------------------------------------------------------------------------------------
# define class of tickers
class Class_ticker:
# master data
def __init__(self):
self.symbol = None
self.company = None
self.sector = None
self.dict = None
def master_data(self, i):
self.symbol = list_symbols[i]
self.company = list_companys[i]
self.sector = list_sectors[i]
self.stockindex = list_stockindex[i]
# data loading
def load_data(self):
self.data = yf.Ticker(self.symbol)
#----------------------------------------------------------------------------------------------
# calculate kpis
#profitability
def kpi_profitability(self, selected_year):
if data_available('Ebit', self.fi, selected_year) and data_available('Total Assets', self.bs, selected_year):
self.roi = self.fi.at['Ebit', selected_year] / self.bs.at['Total Assets', selected_year] * 100
else:
self.roi = np.nan
if data_available('Ebit', self.fi, selected_year) and data_available('Total Stockholder Equity', self.bs, selected_year):
self.roe = self.fi.at['Ebit', selected_year] / self.bs.at['Total Stockholder Equity', selected_year] * 100
else:
self.roe = np.nan
if data_available('Total Revenue', self.fi, selected_year):
self.revenues = self.fi.at['Total Revenue', selected_year] / 1000000000
else:
self.revenues = np.nan
if data_available('Ebit', self.fi, selected_year) and data_available('Depreciation', self.cf, selected_year) and data_available('Total Revenue', self.fi, selected_year):
self.ebitda_margin = (self.fi.at['Ebit', selected_year] + self.cf.at['Depreciation', selected_year]) / self.fi.at['Total Revenue', selected_year] *100
else:
self.ebitda_margin = np.nan
if data_available('Ebit', self.fi, selected_year) and data_available('Total Revenue', self.fi, selected_year):
self.ebit_margin = self.fi.at['Ebit', selected_year] / self.fi.at['Total Revenue', selected_year] * 100
else:
self.ebit_margin = np.nan
self.kpis = [self.roi, self.roe, self.revenues, self.ebitda_margin, self.ebit_margin]
#debt capital
def kpi_debt_capital(self, selected_year):
if data_available('Total Liab', self.bs, selected_year) and data_available('Cash', self.bs, selected_year) and data_available('Ebit', self.fi, selected_year) and data_available('Depreciation', self.cf, selected_year):
self.netdebt_ebitda = (self.bs.at['Total Liab', selected_year] - self.bs.at['Cash', selected_year]) / (self.fi.at['Ebit', selected_year] + self.cf.at['Depreciation', selected_year])
else:
self.netdebt_ebitda = np.nan
if data_available('Ebit', self.fi, selected_year) and data_available('Depreciation', self.cf, selected_year) and data_available('Interest Expense', self.fi, selected_year):
self.ebita_interest = (self.fi.at['Ebit', selected_year] + self.cf.at['Depreciation', selected_year]) / self.fi.at['Interest Expense', selected_year] * -1
else:
self.ebita_interest = np.nan
if data_available('Total Current Liabilities', self.bs, selected_year) and data_available('Total Current Assets', self.bs, selected_year):
self.current_ratio = self.bs.at['Total Current Liabilities', selected_year] / self.bs.at['Total Current Assets', selected_year]
else:
self.current_ratio = np.nan
if data_available('Accounts Payable', self.bs, selected_year) and data_available('Cost Of Revenue', self.fi, selected_year) :
self.dpo = self.bs.at['Accounts Payable', selected_year] * 365 / self.fi.at['Cost Of Revenue', selected_year]
else:
self.dpo = np.nan
self.kpis = [self.netdebt_ebitda, self.ebita_interest, self.current_ratio, self.dpo]
#equity capital
def kpi_equity_capital(self):
if 'revenuePerShare' in self.info:
self.revenuepershare = self.info['revenuePerShare']
else:
self.revenuepershare = np.nan
if 'forwardEps' in self.info:
self.eps = self.info['forwardEps']
else:
self.eps = np.nan
if 'dividendRate' in self.info:
self.dividendrate = self.info['dividendRate']
else:
self.dividendrate = np.nan
self.kpis = [self.revenuepershare, self.eps, self.dividendrate]
#valuation
def kpi_valuation(self):
if 'forwardPE' in self.info:
self.forwardPE = self.info['forwardPE']
else:
self.forwardPE = np.nan
if 'pegRatio' in self.info:
self.pegratio = self.info['pegRatio']
else:
self.pegratio = np.nan
if 'priceToBook' in self.info:
self.pricetobook = self.info['priceToBook']
else:
self.pricetobook = np.nan
if 'enterpriseValue' in self.info:
self.ev = self.info['enterpriseValue'] / 1000000000
else:
self.ev = np.nan
if 'enterpriseToRevenue' in self.info:
self.evtorevenue = self.info['enterpriseToRevenue']
else:
self.evtorevenue = np.nan
if 'enterpriseToEbitda' in self.info:
self.evtoebitda = self.info['enterpriseToEbitda']
else:
self.evtoebitda = np.nan
self.kpis = [self.forwardPE, self.pegratio, self.pricetobook, self.ev, self.evtorevenue, self.evtoebitda]
def stock_history(self):
self.history = self.data.history(period=str(stock_period))
#capital procurement
def kpi_capital_procurement(self, selected_year):
if data_available('Total Cash From Operating Activities', self.cf, selected_year) and data_available('Total Cashflows From Investing Activities', self.cf, selected_year):
self.selffinancingratio = self.cf.at['Total Cash From Operating Activities', selected_year]/ self.cf.at['Total Cashflows From Investing Activities', selected_year] * -1
else:
self.selffinancingratio = np.nan
if data_available('Total Stockholder Equity', self.bs, selected_year) and data_available('Total Assets', self.bs, selected_year):
self.equityratio = self.bs.at['Total Stockholder Equity', selected_year] / self.bs.at['Total Assets', selected_year] * 100
else:
self.equityratio = np.nan
self.kpis = [self.selffinancingratio, self.equityratio]
#capital allocation
def kpi_capital_allocation(self, selected_year):
if data_available('Capital Expenditures', self.cf, selected_year):
self.capexrevenueratio = self.cf.at['Capital Expenditures', selected_year] * -1 / self.fi.at['Total Revenue', selected_year]
else:
self.capexrevenueratio = np.nan
if data_available('Research Development', self.fi, selected_year):
self.RDrevenueratio = self.fi.at['Research Development', selected_year] / self.fi.at['Total Revenue', selected_year]
else:
self.RDrevenueratio = np.nan
if data_available('Inventory', self.bs, selected_year) and data_available('Net Receivables', self.bs, selected_year):
self.ccc = (self.bs.at['Inventory', selected_year] * 365 / self.fi.at['Total Revenue', selected_year]) + (self.bs.at['Net Receivables', selected_year] * 365 / self.fi.at['Total Revenue', selected_year]) - self.dpo
else:
self.ccc = np.nan
self.kpis = [self.capexrevenueratio, self.RDrevenueratio, self.ccc]
#procurement market
def kpi_procurement_market(self, selected_year):
if selected_year == self.fi.columns[0] and 'fullTimeEmployees' in self.info and data_available('Total Revenue', self.fi, selected_year):
self.labour_productivity = self.fi.at['Total Revenue', self.fi.columns[0]] / 1000 / self.info['fullTimeEmployees']
else:
self.labour_productivity = np.nan
if data_available('Total Revenue', self.fi, selected_year) and data_available('Net Receivables', self.bs, selected_year):
self.asset_turnover = self.fi.at['Total Revenue', selected_year] / self.bs.at['Net Receivables', selected_year] * 100
else:
self.asset_turnover = np.nan
self.kpis = [self.labour_productivity, self.asset_turnover]
#----------------------------------------------------------------------------------------------
# load ticker
if stock_search_option=='Symbol':
df_indicesdata = pd.DataFrame({'Ticker': list_symbols, 'Company': list_companies, 'Sector': list_sectors, 'Stock index': list_stockindex})
symbols_SP, symbols_DAX, symbols_FTSE, symbols_BSE, symbols_CAC, symbols_CSI, symbols_KO, symbols_NIK=[],[],[],[],[],[] ,[] ,[]
else:
df_indicesdata, symbols_SP, symbols_DAX, symbols_FTSE, symbols_BSE, symbols_CAC, symbols_CSI, symbols_KO, symbols_NIK = load_ticker()
list_symbols = df_indicesdata['Ticker'].values.tolist()
list_companys = df_indicesdata['Company'].values.tolist()
list_sectors = df_indicesdata['Sector'].values.tolist()
list_stockindex = df_indicesdata['Stock index'].values.tolist()
# ini_msg=st.info("Initializing stock data loading...")
#my_bar = st.progress(0.0)
# progress_sum=len(list_symbols)
#progress = 0
# define ticker object and load data
ticker = function_ticker()
# create dictonary, master data, load data
ticker_dict = {}
for i in range(len(list_symbols)):
ticker[i].master_data(i)
ticker_dict[ticker[i].symbol] = ticker[i].company
ticker[i].load_data()
#progress += 1
#my_bar.progress(progress/progress_sum)
#suc_msg=st.success('Stock data loading... done!')
# time.sleep(1)
# my_bar.empty()
# suc_msg.empty()
#ini_msg.empty()
# check session state
if 'selected_stock' not in st.session_state:
st.session_state['selected_stock'] = None
if 'load_data_button' not in st.session_state:
st.session_state['load_data_button'] = None
if 'list_years' not in st.session_state:
st.session_state['list_years'] = None
#----------------------------------------------------------------------------------------------
if stock_search_option !='Symbol':
co1 = st.container()
if SP500:
ticker_options += symbols_SP
if DAX:
ticker_options +=symbols_DAX
if FTSE:
ticker_options += symbols_FTSE
if CSI300:
ticker_options += symbols_CSI
if NIKKEI225:
ticker_options += symbols_NIK
if BSE_SENSEX:
ticker_options += symbols_BSE
if CAC40:
ticker_options += symbols_CAC
if KOSPI:
ticker_options += symbols_KO
if SP500:
default = 'MSFT'
elif DAX:
default = 'VOW3.DE'
elif FTSE:
default = 'VOD.L'
elif CSI300:
default = '601318.SS'
elif NIKKEI225:
default = '9983.T'
elif BSE_SENSEX:
default = 'HDFCBANK.BO'
elif CAC40:
default = 'BNP.PA'
elif KOSPI:
default = '005930.KS'
else:
st.error('Please select at least one stock index.')
st.stop()
st.markdown("")
selected_stock = st.multiselect('Select at least one company', ticker_options, default , format_func=ticker_dict_func, on_change=in_wid_change)
else:
list_symbols = selected_stock
#list_companies = ticker[0].data.info['longName']
#list_sectors = 'XX'
#list_stockindex = 'XX'
df_indicesdata = pd.DataFrame({'Ticker': list_symbols, 'Company': list_companies, 'Sector': list_sectors, 'Stock index': list_stockindex})
#st.write(df_indicesdata)
# output specification
tb_options=['Basic info','Institutional Holders','Stock Price', 'Balance Sheet', 'Cashflow','Other Financials']
tkpi_options= ['Profitability', 'Debt Capital', 'Equity Capital','Valuation', 'Capital Procurement','Capital Allocation','Procurement Market']
tb_output=st.multiselect('Select ticker basic information',tb_options,['Stock Price', 'Balance Sheet','Cashflow','Other Financials'])
tkpi_output=st.multiselect('Select performance indicators',tkpi_options,['Profitability', 'Debt Capital', 'Valuation'])
if len(selected_stock) == 0:
st.error('Please select at least one ticker.')
st.session_state['selected_stock'] = None
st.stop()
# ticker from the same sector for 1 selection only
if len(selected_stock) == 1:
sector_list = []
sector_ticker = []
sector_list = df_indicesdata[df_indicesdata['Ticker']==selected_stock[0]].index.tolist()
for i in range(len(ticker)):
if ticker[i].sector == ticker[sector_list[0]].sector and ticker[i].stockindex == ticker[sector_list[0]].stockindex and ticker[i].company != ticker[sector_list[0]].company:
sector_list += [i]
sector_ticker += [ticker[i].symbol]
# c1 = st.container()
# c2 = st.container()
# select_all = c2.checkbox('Select all companies of '+ ticker[sector_list[0]].sector + ' in ' + ticker[sector_list[0]].stockindex + ' (can take some time)', on_change=in_wid_change)
# if select_all:
# sector_comparison = c1.multiselect('Select a Ticker from the same sector and stock index for comparison', sector_ticker, sector_ticker, format_func=ticker_dict_func, on_change=in_wid_change)
# else:
# sector_comparison = c1.multiselect('Select a Ticker from the same sector and stock index for comparison', sector_ticker, format_func=ticker_dict_func, on_change=in_wid_change)
# selected_stock += sector_comparison
#else: select_all = None
# create df_selected
df_selected, index_list = function_df_selected()
#st.write('**Your selection:**')
#st.write(df_selected)
# company information
# if len(index_list) < 2:
# for i in index_list:
# if st.checkbox('Show company information for ' + ticker[i].company):
# st.write(ticker[i].data.info['longBusinessSummary'])
# else:
# company_info = st.multiselect('Show company information for ', index_list, format_func=index_dict_func)
# for i in company_info:
# st.markdown('Company information for ' + ticker[i].company)
# st.write(ticker[i].data.info['longBusinessSummary'])
#----------------------------------------------------------------------------------------------
######## DATA LOADING #########
st.markdown("")
b1, b5 = st.columns([5,1])
load_data_button = b1.button('Get Financial Figures')
if b5.button('Clear Cache', on_click=in_wid_change):
st.legacy_caching.clear_cache()
if load_data_button:
st.session_state['load_data_button'] = load_data_button
if st.session_state['load_data_button']:
c3 = st.container()
my_bar = st.progress(0.0)
progress_sum = len(index_list) + len(tb_options)+len(tkpi_options)
progress = 0
if selected_stock != st.session_state['selected_stock'] or load_data_button:
st.session_state['selected_stock'] = selected_stock
list_years = []
for i in index_list:
ticker[i].bs = ticker[i].data.balance_sheet
if ticker[i].bs.empty:
index_list.remove(i)
st.warning('No data found for ' + ticker[i].company)
else:
ticker[i].bs.columns = pd.DatetimeIndex(ticker[i].bs.columns).year
for element in ticker[i].bs.columns:
if element not in list_years:
list_years.append(element)
ticker[i].cf = ticker[i].data.cashflow
ticker[i].cf.columns = pd.DatetimeIndex(ticker[i].cf.columns).year
ticker[i].fi = ticker[i].data.financials
ticker[i].fi.columns = pd.DatetimeIndex(ticker[i].fi.columns).year
ticker[i].info = ticker[i].data.info
# check and adjust duplicate years
list_columns = ticker[i].bs.columns.values.tolist()
for n in range(len(ticker[i].bs.columns)):
if any(ticker[i].bs.columns.duplicated()):
list_duplicate_bool = ticker[i].bs.columns.duplicated()
index_duplicate = [i for i, x in enumerate(list_duplicate_bool) if x]
list_columns[index_duplicate[0]] = list_columns[index_duplicate[0]]-1
ticker[i].bs.columns = list_columns
ticker[i].cf.columns = list_columns
ticker[i].fi.columns = list_columns
progress += 1
my_bar.progress(progress/progress_sum)
# sort list of years
list_years.sort(reverse=True)
st.session_state['list_years'] = list_years
else:
list_years = st.session_state['list_years']
progress += len(index_list)
my_bar.progress(progress/progress_sum)
#----------------------------------------------------------------------------------------------
st.subheader('Ticker basics')
#----------------------------------------------------------------------------------------------
# basic info
if 'Basic info' in tb_output:
with st.expander('Company Info'):
if len(index_list) == 1:
st.markdown('Company information for ' + ticker[0].company)
st.write(ticker[0].data.info['longBusinessSummary'])
else:
for i in index_list:
st.markdown("")
st.markdown('Company information for ' + ticker[i].company)
st.write(ticker[0].data.info['longBusinessSummary'])
#progress
progress += 1
my_bar.progress(progress/progress_sum)
# basic info
if 'Institutional Holders' in tb_output:
with st.expander('Institutional Holders'):
if len(index_list) == 1:
st.markdown('Institutional holders for ' + ticker[0].company +":")
st.write(ticker[0].data.institutional_holders)
else:
for i in index_list:
st.markdown("")
st.markdown('Institutional holders for ' + ticker[i].company +":")
st.write(ticker[i].data.institutional_holders)
#progress
progress += 1
my_bar.progress(progress/progress_sum)
#Stock price
if 'Stock Price' in tb_output:
with st.expander('Stock Price'):
st.write('**Stock Price Development**')
# Selection of parameter
#if len(index_list) == 1:
# stock_para = st.multiselect('Select parameter for stock price visualization', ['Open', 'High', 'Low', 'Close', 'Volume'], 'Open')
#elif len(index_list) > 1:
stock_para = st.selectbox('Select stock price info', ['Open', 'High', 'Low', 'Close', 'Volume'])
stock_period = st.selectbox('Select time period', ['start/end day','max','10y','5y','2y','1y','ytd','6mo','3mo','1mo','5d', '1d'], 2)
if stock_period=='start/end day':
today = datetime.date.today()
last_year = today - datetime.timedelta(days=365)
a1,a2=st.columns(2)
with a1:
start_date = st.date_input('Select start date', last_year, key=2)
with a2:
end_date = st.date_input('Select end date', today,key=3)
if start_date > end_date:
st.error('ERROR: End date must fall after start date.')
if len(index_list) < 6:
df_history = pd.DataFrame()
selected_company = []
for i in index_list:
if stock_period=='start/end day':
ticker[i].history = ticker[i].data.history(period='1d', start=start_date, end=end_date)
else:
ticker[i].history = ticker[i].data.history(period=str(stock_period))
if df_history.empty:
df_history = pd.DataFrame(ticker[i].history[stock_para])
selected_company += [ticker[i].company]
else:
df_history.insert(len(df_history.columns) , ticker[i].company, ticker[i].history[stock_para])
selected_company += [ticker[i].company]
df_history.columns = selected_company
else:
df_history = pd.DataFrame()
selected_company = []
for i in st.multiselect('Select company for visualization', index_list, index_list[0], format_func=index_dict_func):
if stock_period=='start & end day':
ticker[i].history = ticker[i].data.history(period='1d',start=start_date,end=end_date)
else:
ticker[i].history = ticker[i].data.history(period=str(stock_period))
if df_history.empty:
df_history = pd.DataFrame(ticker[i].history[stock_para])
selected_company += [ticker[i].company]
else:
df_history.insert(len(df_history.columns) , ticker[i].company, ticker[i].history[stock_para])
selected_company += [ticker[i].company]
df_history.columns = selected_company
st.line_chart(df_history)
#progress
progress += 1
my_bar.progress(progress/progress_sum)
# Balance Sheet
if 'Balance Sheet' in tb_output:
with st.expander('Balance Sheet'):
df_bs = pd.DataFrame()
no_rows=len(ticker[index_list[0]].data.balance_sheet.index)
label_index=ticker[index_list[0]].data.balance_sheet.index
if len(index_list) == 1:
for y in ticker[index_list[0]].data.balance_sheet.columns:
df_bs=ticker[index_list[0]].data.balance_sheet
if len(index_list) > 1:
selected_year = st.selectbox('select year', list_years, key=4)
year_id=max(list_years)-selected_year
for i in index_list:
if selected_year in pd.DatetimeIndex(ticker[i].data.balance_sheet.columns).year:
year_id=max(list_years)-selected_year
df_col=ticker[i].data.balance_sheet.iloc[:,[year_id]]
df_col.columns=[ticker[i].company]
else:
df_col =np.empty(no_rows)
df_col[:] = np.NaN
df_col=pd.DataFrame(df_col,index=label_index,columns=[ticker[i].company])
if df_bs.empty:
df_bs = df_col
else:
df_bs[ticker[i].company]=df_col
st.dataframe(df_bs.style.format("{:.2f}"))
#progress
progress += 1
my_bar.progress(progress/progress_sum)
# Cashflow
if 'Cashflow' in tb_output:
with st.expander('Cashflow'):
df_cf = pd.DataFrame()
no_rows=len(ticker[index_list[0]].data.cashflow.index)
label_index=ticker[index_list[0]].data.cashflow.index
if len(index_list) == 1:
for y in ticker[index_list[0]].cf.columns:
df_cf=ticker[index_list[0]].data.cashflow
if len(index_list) > 1:
selected_year = st.selectbox('select year', list_years, key=5)
year_id=max(list_years)-selected_year
for i in index_list:
if selected_year in ticker[i].cf.columns:
year_id=max(list_years)-selected_year
df_col=ticker[i].data.cashflow.iloc[:,[year_id]]
df_col.columns=[ticker[i].company]
else:
df_col =np.empty(no_rows)
df_col[:] = np.NaN
df_col=pd.DataFrame(df_col,index=label_index,columns=[ticker[i].company])
if df_cf.empty:
df_cf = df_col
else:
df_cf[ticker[i].company]=df_col
st.dataframe(df_cf.style.format("{:.2f}"))
#progress
progress += 1
my_bar.progress(progress/progress_sum)
# Other financials
if 'Other Financials' in tb_output:
with st.expander('Other Financials'):
df_of = pd.DataFrame()
no_rows=len(ticker[index_list[0]].data.financials.index)
label_index=ticker[index_list[0]].data.financials.index
if len(index_list) == 1:
for y in pd.DatetimeIndex(ticker[0].data.financials.columns).year:
df_of=ticker[index_list[0]].data.financials
if len(index_list) > 1:
selected_year = st.selectbox('select year', list_years, key=6)
year_id=max(list_years)-selected_year
for i in index_list:
if selected_year in pd.DatetimeIndex(ticker[i].data.financials.columns).year:
year_id=max(list_years)-selected_year
df_col=ticker[i].data.financials.iloc[:,[year_id]]
df_col.columns=[ticker[i].company]
else:
df_col =np.empty(no_rows)
df_col[:] = np.NaN
df_col=pd.DataFrame(df_col,index=label_index,columns=[ticker[i].company])
if df_of.empty:
df_of = df_col
else:
df_of[ticker[i].company]=df_col
st.dataframe(df_of)
#progress
progress += 1
my_bar.progress(progress/progress_sum)
if len(tb_output)>0:
#download excel file
st.markdown("")
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
if 'Balance Sheet' in tb_output:
df_bs.to_excel(excel_file, sheet_name='Balance sheet')
if 'Stock Price' in tb_output:
df_history.to_excel(excel_file, sheet_name='Stock Price')
if 'Cashflow' in tb_output:
df_cf.to_excel(excel_file, sheet_name='Cashflow')
if 'Other Financials' in tb_output:
df_of.to_excel(excel_file, sheet_name='Other Financials')
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Stock Basic Info.xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download Stock Basic Info</a>
""",
unsafe_allow_html=True)
#------------------------------------------------------------------------
st.subheader('KPI-Dashboard')
# profitability
if 'Profitability' in tkpi_output:
with st.expander('Profitability'):
label_index = 'Return on Investment (ROI) [in %]', 'Return on Equity (ROE) [in %]', 'Total Revenue [in billion]', 'EBITDA-Margin [in %]', 'EBIT-Margin [in %]'
df_profitability = pd.DataFrame()
if len(index_list) == 1:
for y in ticker[index_list[0]].bs.columns:
ticker[index_list[0]].kpi_profitability(y)
df_profitability = fill_df_func(df_profitability, ticker, index_list[0], label_index, y)
if len(index_list) > 1:
selected_year = st.selectbox('select year', list_years, key=7)
for i in index_list:
if selected_year in ticker[i].bs.columns:
ticker[i].kpi_profitability(selected_year)
else:
ticker[i].kpis = np.empty(len(label_index))
ticker[i].kpis.fill(np.nan)
df_profitability = fill_df_func(df_profitability, ticker, i, label_index, ticker[i].company)
df_profitability = average_func(index_list, df_selected, df_profitability)
st.dataframe(df_profitability.style.format("{:.2f}"))
#progress
progress += 1
my_bar.progress(progress/progress_sum)
#----------------------------------------------------------------------------------------------
# debt capital
if 'Debt Capital' in tkpi_output:
with st.expander('Debt capital'):
label_index = 'Net Debt/EBITDA', 'EBITDA/Interest', 'Current Ratio', 'Days Payable Outstanding [in days]'
df_debt_capital = pd.DataFrame()
if len(index_list) == 1:
for y in ticker[index_list[0]].bs.columns:
ticker[index_list[0]].kpi_debt_capital(y)
df_debt_capital = fill_df_func(df_debt_capital, ticker, index_list[0], label_index, y)
if len(index_list) > 1:
selected_year = st.selectbox('select year', list_years, key=8)
for i in index_list:
if selected_year in ticker[i].bs.columns:
ticker[i].kpi_debt_capital(selected_year)
else:
ticker[i].kpis = np.empty(len(label_index))
ticker[i].kpis.fill(np.nan)
df_debt_capital = fill_df_func(df_debt_capital, ticker, i, label_index, ticker[i].company)
df_debt_capital = average_func(index_list, df_selected, df_debt_capital)
st.dataframe(df_debt_capital.style.format("{:.2f}"))
#progress
progress += 1
my_bar.progress(progress/progress_sum)
#----------------------------------------------------------------------------------------------
# equity capital
if 'Equity Capital' in tkpi_output:
with st.expander('Equity capital'):
label_index = 'Revenues per Share', ' Forward EPS', 'Forward Annual Dividend Rate'
df_equity_capital = pd.DataFrame()
for i in index_list:
ticker[i].kpi_equity_capital()
df_equity_capital = fill_df_func(df_equity_capital, ticker, i, label_index, ticker[i].company)
df_equity_capital = average_func(index_list, df_selected, df_equity_capital)
st.dataframe(df_equity_capital.style.format("{:.2f}"))
#progress
progress += 1
my_bar.progress(progress/progress_sum)
#----------------------------------------------------------------------------------------------
# valuation
if 'Valuation' in tkpi_output:
with st.expander('Valuation'):
label_index = 'Forward P/E', 'PEG Ratio (5yr expected)', 'P/B Ratio', 'Enterprise Value (EV) [in billion]', 'EV/Revenue', 'EV/EBITDA'
df_valuation = pd.DataFrame()
for i in index_list:
ticker[i].kpi_valuation()
df_valuation = fill_df_func(df_valuation, ticker, i, label_index, ticker[i].company)
df_valuation = average_func(index_list, df_selected, df_valuation)
st.dataframe(df_valuation.style.format(formatter="{:.2f}"))
st.write('')
#progress
progress += 1
my_bar.progress(progress/progress_sum)
#----------------------------------------------------------------------------------------------
# capital procurement
if 'Capital Procurement' in tkpi_output:
with st.expander('Capital Procurement'):
label_index = 'Self-Financing Ratio', 'Equity Ratio [in %]'
df_capital_procurement = pd.DataFrame()
if len(index_list) == 1:
for y in ticker[index_list[0]].bs.columns:
ticker[index_list[0]].kpi_capital_procurement(y)
df_capital_procurement = fill_df_func(df_capital_procurement, ticker, index_list[0], label_index, y)
if len(index_list) > 1:
selected_year = st.selectbox('select year', list_years, key=9)
for i in index_list:
if selected_year in ticker[i].bs.columns:
ticker[i].kpi_capital_procurement(selected_year)
else:
ticker[i].kpis = np.empty(len(label_index))
ticker[i].kpis.fill(np.nan)
df_capital_procurement = fill_df_func(df_capital_procurement, ticker, i, label_index, ticker[i].company)
df_capital_procurement = average_func(index_list, df_selected, df_capital_procurement)
st.dataframe(df_capital_procurement.style.format("{:.2f}"))
#progress
progress += 1
my_bar.progress(progress/progress_sum)
#----------------------------------------------------------------------------------------------
# capital allocation
if 'Capital Allocation' in tkpi_output:
with st.expander('Capital Allocation'):
label_index = 'CapEx/Revenue', 'Research & Development/Revenue', 'Cash Conversion Cycle [in days]'
df_capital_allocation = pd.DataFrame()
if len(index_list) == 1:
for y in ticker[index_list[0]].bs.columns:
ticker[index_list[0]].kpi_capital_allocation(y)
df_capital_allocation = fill_df_func(df_capital_allocation, ticker, index_list[0], label_index, y)
if len(index_list) > 1:
selected_year = st.selectbox('select year', list_years, key=10)
for i in index_list:
if selected_year in ticker[i].bs.columns:
ticker[i].kpi_capital_allocation(selected_year)
else:
ticker[i].kpis = np.empty(len(label_index))
ticker[i].kpis.fill(np.nan)
df_capital_allocation = fill_df_func(df_capital_allocation, ticker, i, label_index, ticker[i].company)
df_capital_allocation = average_func(index_list, df_selected, df_capital_allocation)
st.dataframe(df_capital_allocation.style.format("{:.2f}"))
#progress
progress += 1
my_bar.progress(progress/progress_sum)
#----------------------------------------------------------------------------------------------
# procurement market
if 'Procurement Market' in tkpi_output:
with st.expander('Procurement Market'):
label_index = 'Labour Productivity [in T per employee]', 'Asset turnover [in %]'
df_procurement_market = pd.DataFrame()
if len(index_list) == 1:
for y in ticker[index_list[0]].bs.columns:
ticker[index_list[0]].kpi_procurement_market(y)
df_procurement_market = fill_df_func(df_procurement_market, ticker, index_list[0], label_index, y)
if len(index_list) > 1:
selected_year = st.selectbox('select year', list_years, key=11)
for i in index_list:
if selected_year in ticker[i].bs.columns:
ticker[i].kpi_procurement_market(selected_year)
else:
ticker[i].kpis = np.empty(len(label_index))
ticker[i].kpis.fill(np.nan)
df_procurement_market = fill_df_func(df_procurement_market, ticker, i, label_index, ticker[i].company)
df_procurement_market = average_func(index_list, df_selected, df_procurement_market)
st.dataframe(df_procurement_market.style.format("{:.2f}"))
#progress
progress += 1
my_bar.progress(progress/progress_sum)
if progress == progress_sum:
c3suc_msg=c3.success('Data loading is completed!')
time.sleep(2)
my_bar.empty()
c3suc_msg.empty()
if len(tkpi_output)>0:
#download excel file
st.markdown("")
output = BytesIO()
excel_file = pd.ExcelWriter(output, engine="xlsxwriter")
if 'Profitability' in tkpi_output:
df_profitability.to_excel(excel_file, sheet_name='KPI-Dashboard', startrow=3)
excel_file.sheets['KPI-Dashboard'].write(1, 0, 'Profitability')
if 'Debt Capital' in tkpi_output:
df_debt_capital.to_excel(excel_file, sheet_name='KPI-Dashboard', startrow=12)
excel_file.sheets['KPI-Dashboard'].write(10, 0, 'Dept Capital')
if 'Equity Capital' in tkpi_output:
df_equity_capital.to_excel(excel_file, sheet_name='KPI-Dashboard', startrow=20)
excel_file.sheets['KPI-Dashboard'].write(18, 0, 'Equity Capital')
if 'Valuation' in tkpi_output:
df_valuation.to_excel(excel_file, sheet_name='KPI-Dashboard', startrow=27)
excel_file.sheets['KPI-Dashboard'].write(25, 0, 'Valuation')
if 'Capital Procurement' in tkpi_output:
df_capital_procurement.to_excel(excel_file, sheet_name='KPI-Dashboard', startrow=37)
excel_file.sheets['KPI-Dashboard'].write(35, 0, 'Capital Procurement')
if 'Capital Allocation' in tkpi_output:
df_capital_allocation.to_excel(excel_file, sheet_name='KPI-Dashboard', startrow=43)
excel_file.sheets['KPI-Dashboard'].write(41, 0, 'Capital Allocation')
if 'Procurement Market' in tkpi_output:
df_procurement_market.to_excel(excel_file, sheet_name='KPI-Dashboard', startrow=50)
excel_file.sheets['KPI-Dashboard'].write(48, 0, 'Procurement Market')
excel_file.save()
excel_file = output.getvalue()
b64 = base64.b64encode(excel_file)
dl_file_name = "Financial Analysis.xlsx"
st.markdown(
f"""
<a href="data:file/excel_file;base64,{b64.decode()}" id="button_dl" download="{dl_file_name}">Download KPI-Dashboard</a>
""",
unsafe_allow_html=True)
| 67.525354
| 9,951
| 0.541961
|
912295f351682079d811a627513285af70f638e6
| 3,072
|
py
|
Python
|
bin/gftools-update-families.py
|
hyvyys/gftools
|
85ef924f9307f290be08b15115805cc5e3287d33
|
[
"Apache-2.0"
] | 150
|
2018-07-04T12:53:34.000Z
|
2022-03-31T21:16:10.000Z
|
bin/gftools-update-families.py
|
hyvyys/gftools
|
85ef924f9307f290be08b15115805cc5e3287d33
|
[
"Apache-2.0"
] | 365
|
2018-06-28T19:35:22.000Z
|
2022-03-30T17:07:56.000Z
|
bin/gftools-update-families.py
|
hyvyys/gftools
|
85ef924f9307f290be08b15115805cc5e3287d33
|
[
"Apache-2.0"
] | 53
|
2018-07-20T01:47:44.000Z
|
2022-01-31T16:43:25.000Z
|
#!/usr/bin/env python3
# Copyright 2016 The Font Bakery Authors.
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__="The Google Fonts Tools Authors"
import argparse
import glob
import logging
import os
# set up some command line argument processing
parser = argparse.ArgumentParser(description="Compare TTF files when upgrading families.")
parser.add_argument('arg_filepaths', nargs='+',
help='font file path(s) to check.'
' Wildcards like *.ttf are allowed.')
parser.add_argument('-v', '--verbose', action='count', default=0, help="increase output verbosity")
#=====================================
# Main sequence of checkers & fixers
def main():
# set up a basic logging config
# to include timestamps
# log_format = '%(asctime)s %(levelname)-8s %(message)s'
log_format = '%(levelname)-8s %(message)s '
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
args = parser.parse_args()
if args.verbose == 1:
logger.setLevel(logging.INFO)
elif args.verbose >= 2:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
#------------------------------------------------------
logging.debug("Checking each file is a ttf")
fonts_to_check = []
for arg_filepath in sorted(args.arg_filepaths):
# use glob.glob to accept *.ttf
for fullpath in glob.glob(arg_filepath):
file_path, file_name = os.path.split(fullpath)
if file_name.endswith(".ttf"):
logging.debug("Adding '{}'".format(file_name))
fonts_to_check.append(fullpath)
else:
logging.warning("Skipping '{}' as file is not a ttf".format(file_name))
fonts_to_check.sort()
#------------------------------------------------------
for new_file in fonts_to_check:
logging.debug("Comparison of filesizes")
old_file = new_file + "-old"
new_filesize = os.path.getsize(new_file)
old_filesize = os.path.getsize(old_file)
delta = new_filesize - old_filesize
percentage = float(delta) / old_filesize
if delta>0:
logging.warning("New font file '{}' is {} bytes larger".format(
new_file, delta))
elif delta<0:
logging.warning("New font file '{}' is {} bytes smaller".format(
new_file, -delta))
else:
logging.info("New font file '{}' preserves filesize.".format(new_file))
if __name__=='__main__':
main()
| 35.72093
| 99
| 0.657552
|
91358f34676ca4623ec13cce4ba102738cc821db
| 742
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/functionalities/thread/concurrent_events/TestConcurrentDelaySignalWatch.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 305
|
2019-09-14T17:16:05.000Z
|
2022-03-31T15:05:20.000Z
|
lldb/packages/Python/lldbsuite/test/functionalities/thread/concurrent_events/TestConcurrentDelaySignalWatch.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 14
|
2020-02-03T23:39:51.000Z
|
2021-07-20T16:24:25.000Z
|
lldb/packages/Python/lldbsuite/test/functionalities/thread/concurrent_events/TestConcurrentDelaySignalWatch.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 24
|
2019-10-03T11:22:11.000Z
|
2022-01-25T09:59:30.000Z
|
import unittest2
from lldbsuite.test.decorators import *
from lldbsuite.test.concurrent_base import ConcurrentEventsBase
from lldbsuite.test.lldbtest import TestBase
@skipIfWindows
class ConcurrentDelaySignalWatch(ConcurrentEventsBase):
mydir = ConcurrentEventsBase.compute_mydir(__file__)
@skipIfFreeBSD # timing out on buildbot
# Atomic sequences are not supported yet for MIPS in LLDB.
@skipIf(triple='^mips')
@add_test_categories(["watchpoint"])
def test(self):
"""Test a watchpoint and a (1 second delay) signal in multiple threads."""
self.build(dictionary=self.getBuildFlags())
self.do_thread_actions(
num_delay_signal_threads=1,
num_watchpoint_threads=1)
| 30.916667
| 82
| 0.738544
|
dcb9477eacc34042874ebb3d11b8f0cd6b3f4ee8
| 5,943
|
py
|
Python
|
simple_qt/gui/goto_frame.py
|
zleffke/nexstar
|
8676fe6184b89e1ee0399da207b11e9d9c8d0bb4
|
[
"MIT"
] | null | null | null |
simple_qt/gui/goto_frame.py
|
zleffke/nexstar
|
8676fe6184b89e1ee0399da207b11e9d9c8d0bb4
|
[
"MIT"
] | null | null | null |
simple_qt/gui/goto_frame.py
|
zleffke/nexstar
|
8676fe6184b89e1ee0399da207b11e9d9c8d0bb4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#-- coding: utf-8 --
from PyQt5 import QtCore
from PyQt5 import Qt
from nexstar import *
class goto_frame(Qt.QFrame):
def __init__(self, parent=None):
super(goto_frame, self).__init__()
self.parent = parent
self.initUI()
def initUI(self):
self.setFrameShape(Qt.QFrame.StyledPanel)
self.init_widgets()
self.connect_signals()
def init_widgets(self):
lbl_width = 50
val_width = 125
lbl_height = 12
btn_height = 20
frame_lbl = Qt.QLabel("GoTo Controls:")
frame_lbl.setAlignment(Qt.Qt.AlignLeft|Qt.Qt.AlignVCenter)
frame_lbl.setStyleSheet("QLabel {font:12pt; font-weight:bold; text-decoration: underline; color:rgb(255,0,0);}")
frame_lbl.setFixedWidth(200)
frame_lbl.setFixedHeight(20)
self.azLabel = Qt.QLabel("Target Azimuth:")
self.azLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.azLabel.setStyleSheet("QLabel {font:10pt; color:rgb(255,0,0);}")
self.azTextBox = Qt.QLineEdit()
self.azTextBox.setText("000.000")
self.azTextBox.setInputMask("#00.000;")
self.azTextBox.setEchoMode(Qt.QLineEdit.Normal)
self.azTextBox.setStyleSheet("QLineEdit {font:10pt; background-color:rgb(200,75,75); color:rgb(0,0,0);}")
self.azTextBox.setMaxLength(3)
self.azTextBox.setFixedWidth(80)
self.azTextBox.setFixedHeight(20)
az_hbox = Qt.QHBoxLayout()
az_hbox.addWidget(self.azLabel)
az_hbox.addWidget(self.azTextBox)
self.elLabel = Qt.QLabel("Target Elevation:")
self.elLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.elLabel.setStyleSheet("QLabel {font:10pt; color:rgb(255,0,0);}")
self.elTextBox = Qt.QLineEdit()
self.elTextBox.setText("00.000")
self.elTextBox.setInputMask("#00.000;")
self.elTextBox.setEchoMode(Qt.QLineEdit.Normal)
self.elTextBox.setStyleSheet("QLineEdit {font:10pt; background-color:rgb(200,75,75); color:rgb(0,0,0);}")
self.elTextBox.setMaxLength(3)
self.elTextBox.setFixedWidth(80)
self.elTextBox.setFixedHeight(20)
el_hbox = Qt.QHBoxLayout()
el_hbox.addWidget(self.elLabel)
el_hbox.addWidget(self.elTextBox)
self.gotoButton = Qt.QPushButton("GoTo")
self.gotoButton.setFixedHeight(btn_height)
self.gotoButton.setStyleSheet("QPushButton { font:10pt; background-color:rgb(200,0,0); }")
self.cancelButton = Qt.QPushButton("Cancel")
self.cancelButton.setFixedHeight(btn_height)
self.cancelButton.setStyleSheet("QPushButton { font:10pt; background-color:rgb(200,0,0); }")
btn_hbox = Qt.QHBoxLayout()
btn_hbox.addWidget(self.gotoButton)
btn_hbox.addWidget(self.cancelButton)
lbl = Qt.QLabel("Tracking Mode:")
lbl.setAlignment(Qt.Qt.AlignRight|Qt.Qt.AlignVCenter)
lbl.setStyleSheet("QLabel {font:10pt; color:rgb(255,0,0);}")
lbl.setFixedWidth(100)
lbl.setFixedHeight(lbl_height)
self.tmRb_off = Qt.QRadioButton("OFF")
self.tmRb_off.setChecked(True)
self.tmRb_off.mode = 0
self.tmRb_off.setStyleSheet("QRadioButton { font:10pt; color:rgb(200,0,0); }")
self.tmRb_altaz = Qt.QRadioButton("ALT AZ")
self.tmRb_altaz.setChecked(False)
self.tmRb_altaz.mode = 1
self.tmRb_altaz.setStyleSheet("QRadioButton { font:10pt; color:rgb(200,0,0); }")
self.getTmButton = Qt.QPushButton("Get Tracking Mode")
self.getTmButton.setFixedHeight(btn_height)
self.getTmButton.setStyleSheet("QPushButton { font:10pt; background-color:rgb(200,0,0); }")
tm_hbox = Qt.QHBoxLayout()
tm_hbox.addStretch(1)
tm_hbox.addWidget(lbl)
tm_hbox.addWidget(self.tmRb_off)
tm_hbox.addWidget(self.tmRb_altaz)
vbox = Qt.QVBoxLayout()
vbox.addWidget(frame_lbl)
vbox.addLayout(az_hbox)
vbox.addLayout(el_hbox)
vbox.addLayout(btn_hbox)
vbox.addWidget(self.getTmButton)
vbox.addLayout(tm_hbox)
self.setLayout(vbox)
def connect_signals(self):
self.cancelButton.clicked.connect(self.cancelButton_event)
self.gotoButton.clicked.connect(self.gotoButton_event)
self.tmRb_off.toggled.connect(self.set_track_mode)
self.tmRb_altaz.toggled.connect(self.set_track_mode)
self.getTmButton.clicked.connect(self.getTmButton_event)
def set_track_mode(self):
rb = self.sender()
if rb.isChecked():
self.parent.set_tracking_mode(rb.mode)
def getTmButton_event(self):
self.parent.getTrackingMode()
def cancelButton_event(self):
self.parent.cancel_goto()
def gotoButton_event(self):
self.tar_az = float(self.azTextBox.text())
self.tar_el = float(self.elTextBox.text())
self.parent.goto_position(self.tar_az, self.tar_el)
def increment_target_angle(self, az_el, val):
#Called by button control frame
if az_el == 'az':
self.tar_az += val
self.update_target_azimuth()
elif az_el == 'el':
self.tar_el += val
self.update_target_elevation()
#self.callback.set_position(self.tar_az, self.tar_el, True)
def update_target_azimuth(self):
if self.tar_az < -180.0: self.tar_az = -180.0
if self.tar_az > 540.0: self.tar_az = 540.0
#self.az_compass.set_tar_az(self.tar_az)
#self.az_lcd_fr.set_tar(self.tar_az)
#self.azTextBox.setText(str(self.tar_az))
def update_target_elevation(self):
if self.tar_el < 0: self.tar_el = 0
if self.tar_el > 180: self.tar_el = 180
#self.el_compass.set_tar_el(self.tar_el)
#self.el_lcd_fr.set_tar(self.tar_el)
#self.elTextBox.setText(str(self.tar_el))
| 39.62
| 120
| 0.659936
|
3fc4ed575afd56a4dcc20ea5bd196c4097a08511
| 1,550
|
py
|
Python
|
3_estrutura_de_repeticao/43_cardapio_lanchonete.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
3_estrutura_de_repeticao/43_cardapio_lanchonete.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
3_estrutura_de_repeticao/43_cardapio_lanchonete.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
"""
43. O cardápio de uma lanchonete é o seguinte:
Especificação Código Preço
Cachorro Quente 100 R$ 1,20
Bauru Simples 101 R$ 1,30
Bauru com ovo 102 R$ 1,50
Hambúrguer 103 R$ 1,20
Cheeseburguer 104 R$ 1,30
Refrigerante 105 R$ 1,00
Faça um programa que leia o código dos itens pedidos e as quantidades
desejadas.
Calcule e mostre o valor a ser pago por item (preço * quantidade) e o total
geral do pedido.
Considere que o cliente deve informar quando o pedido deve ser encerrado.
"""
cardapio = [{'especificacao': 'Cachorro Quente', 'codigo': 100, 'valor': 1.20},
{'especificacao': 'Bauru Simples', 'codigo': 101, 'valor': 1.30},
{'especificacao': 'Bauru com ovo', 'codigo': 102, 'valor': 1.50},
{'especificacao': 'Hambúrguer', 'codigo': 103, 'valor': 1.20},
{'especificacao': 'Cheeseburguer', 'codigo': 104, 'valor': 1.30},
{'especificacao': 'Refrigerante', 'codigo': 105, 'valor': 1.00}]
itens = []
codigo = 1
while codigo != 0:
codigo = int(input("Informe o código do produto: "))
quantidade = int(input("Informe a quantidade do produto: "))
itens.append({'codigo': codigo, 'quantidade': quantidade})
print("Especificação Quantidade Preço")
for item in itens:
print("{} {} {}".format(cardapio['espeficacao'],
item['quantidade'],
item['quantidade'] * cardapio['valor']))
| 37.804878
| 80
| 0.579355
|
ae44aa1313c1bd612e44f5582947192de0c55da7
| 4,056
|
py
|
Python
|
scripts/demo.py
|
jetsunwhitton/pico-ner-relations
|
a11d48f2bd7a8ede73865f13123862262f5227d8
|
[
"Apache-2.0"
] | null | null | null |
scripts/demo.py
|
jetsunwhitton/pico-ner-relations
|
a11d48f2bd7a8ede73865f13123862262f5227d8
|
[
"Apache-2.0"
] | null | null | null |
scripts/demo.py
|
jetsunwhitton/pico-ner-relations
|
a11d48f2bd7a8ede73865f13123862262f5227d8
|
[
"Apache-2.0"
] | null | null | null |
import spacy_streamlit, spacy, operator
import streamlit as st
from spacy import displacy
from spacy.pipeline import merge_entities
# make the factory work
from rel_pipe import make_relation_extractor, score_relations
# make the config work
from rel_model import create_relation_model, create_classification_layer, create_instances, create_tensors
from tabulate import relation_extraction, tabulate_pico_entities
import base64
# set page config
st.set_page_config(
page_title="RCT-ART",
page_icon="logo.jpg"
)
st.sidebar.image("logo.jpg")
st.sidebar.markdown("RCT-ART is an NLP pipeline built with spaCy for converting clinical trial result sentences into tables through jointly extracting intervention, outcome and outcome measure entities and their relations. ")
st.sidebar.subheader("Current constraints:")
st.sidebar.markdown("""
- Only abstracts from studies with 2 trial arms
- Must be a sentence with study results
- Sentence must contain at least least one intervention (e.g. drug name), outcome description (e.g. blood pressure) and non-comparative outcome measure)
""")
st.title("Demo")
st.header("Randomised Controlled Trial Abstract Result Tabulator")
ner_model = "trained_models/biobert/ner/all_domains/model-best"
rel_model = "trained_models/biobert/rel/all_domains/model-best"
default_text = "Somnolence , the most frequently reported adverse event , was noted in 72.5 % versus 7.7 % of subjects ( risperidone vs placebo ) and seemed manageable with dose/dose-schedule modification ."
st.subheader("Enter result sentence for analysis")
text = st.text_area("Input should follow constraints outlined in sidebar", default_text, height=200)
nlp = spacy.load("trained_models/biobert/ner/all_domains/model-best")
ent_doc = nlp(text)
st.subheader("NER analysis")
spacy_streamlit.visualize_ner(
ent_doc,
labels=["INTV", "OC", "MEAS"],
show_table=False,
title=False
)
rel_doc = relation_extraction(rel_model,[ent_doc])[0]
deps = {"words": [],"arcs": []}
for tok in rel_doc:
deps["words"].append({"text": tok.text, "tag": tok.ent_type_})
for key in rel_doc._.rel:
rel = rel_doc._.rel[key] # get relation
pred_rel = max(rel.items(), key=operator.itemgetter(1)) # selects relation type with highest probability
if pred_rel[1] > 0.5: # includes relation if above set threshold for probability
if key[0] > key[1] and rel_doc[key[1]].ent_type_ != "MEAS":
deps["arcs"].append({"start": key[1], "end": key[0], "label": pred_rel[0], "dir": "right"})
elif key[0] > key[1]:
deps["arcs"].append({"start": key[1], "end": key[0], "label": pred_rel[0], "dir": "left"})
elif rel_doc[key[1]].ent_type_ != "MEAS":
deps["arcs"].append({"start": key[0], "end": key[1], "label": pred_rel[0], "dir": "left"})
else:
deps["arcs"].append({"start": key[0], "end": key[1], "label": pred_rel[0], "dir": "right"})
html = displacy.render(deps, style="dep", manual=True, options={'distance':80})
st.subheader("RE analysis")
st.write(spacy_streamlit.util.get_svg(html), unsafe_allow_html=True)
heading_properties = [('font-size', '16px')]
cell_properties = [('font-size', '16px')]
dfstyle = [dict(selector="th", props=heading_properties),dict(selector="td", props=cell_properties)]
df = tabulate_pico_entities(rel_doc)
print(rel_doc._.rel)
#df.style.set_table_styles([cell_hover, index_names, headers])
st.subheader("Tabulation")
st.table(df.style.set_table_styles(dfstyle))
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}" download="result_sentence.csv">Download csv file</a>'
return href
st.markdown(get_table_download_link(df), unsafe_allow_html=True)
| 41.387755
| 225
| 0.708826
|
2564c9ff1fd1030d40199b21786eb111492f0536
| 3,008
|
py
|
Python
|
superlists/functional_tests/base.py
|
alvarocneto/TDD_python
|
d9646802f08a18f3aa4743bbe48f8a22eebf07f7
|
[
"MIT"
] | null | null | null |
superlists/functional_tests/base.py
|
alvarocneto/TDD_python
|
d9646802f08a18f3aa4743bbe48f8a22eebf07f7
|
[
"MIT"
] | null | null | null |
superlists/functional_tests/base.py
|
alvarocneto/TDD_python
|
d9646802f08a18f3aa4743bbe48f8a22eebf07f7
|
[
"MIT"
] | null | null | null |
import os
import time
from django.conf import settings
from django.contrib.auth import (BACKEND_SESSION_KEY, SESSION_KEY,
get_user_model)
from django.contrib.sessions.backends.db import SessionStore
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.keys import Keys
User = get_user_model()
MAX_WAIT = 10
def wait(fn):
def modified_fn(*args, **kwargs):
start_time = time.time()
while True:
try:
return fn(*args, **kwargs)
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
return modified_fn
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
staging_server = os.environ.get('STAGING_SERVER')
if staging_server:
self.live_server_url = f'http://{staging_server}'
def tearDown(self):
self.browser.quit()
@wait
def wait_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
@wait
def wait_for(self, lambda_function):
return lambda_function()
def get_item_input_box(self):
return self.browser.find_element_by_id('id_text')
@wait
def wait_to_be_logged_in(self, email):
self.browser.find_element_by_link_text('Log Out')
navbar = self.browser.find_element_by_css_selector('.navbar')
self.assertIn(email, navbar.text)
@wait
def wait_to_be_logged_out(self, email):
self.browser.find_element_by_name('email')
navbar = self.browser.find_element_by_css_selector('.navbar')
self.assertNotIn(email, navbar.text)
def add_list_item(self, item_text):
num_rows = len(self.browser.find_elements_by_css_selector('#id_list_table tr'))
self.get_item_input_box().send_keys(item_text)
self.get_item_input_box().send_keys(Keys.ENTER)
item_number = num_rows + 1
self.wait_for_row_in_list_table(f'{item_number}: {item_text}')
def create_pre_authenticated_session(self, email):
user = User.objects.create(email=email)
session = SessionStore()
session[SESSION_KEY] = user.pk
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session.save()
## para definir um cookie, precisamos antes acessar o domínio.
## as páginas 404 são as que carregam mais rapidamente!
self.browser.get(self.live_server_url + "/404_no_such_url/")
self.browser.add_cookie(dict(
name=settings.SESSION_COOKIE_NAME,
value=session.session_key,
path="/",
))
| 34.976744
| 87
| 0.674202
|
cd32c2f4b4816f2614ab859a372240546577a239
| 5,640
|
py
|
Python
|
pyprof/prof/misc.py
|
ethem-kinginthenorth/PyProf
|
5430eb3a95f9afc4443eb329f2e831df62cecfe6
|
[
"Apache-2.0"
] | null | null | null |
pyprof/prof/misc.py
|
ethem-kinginthenorth/PyProf
|
5430eb3a95f9afc4443eb329f2e831df62cecfe6
|
[
"Apache-2.0"
] | null | null | null |
pyprof/prof/misc.py
|
ethem-kinginthenorth/PyProf
|
5430eb3a95f9afc4443eb329f2e831df62cecfe6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from utility import Utility
from base import OperatorLayerBase
class Foo(OperatorLayerBase):
"""
An object of Foo is instantiated when we detect an unsupported operator.
"""
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
shapes = []
types = []
for arg in args:
if arg['type'] == "tensor":
shapes.append(arg['shape'])
types.append(arg['dtype'])
self.shape = shapes
self.type = types
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def flops(self):
return 0
def bytes(self):
return 0
class Copy(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "copy_")
assert (len(args) == 2)
dst, src = args
assert (src['type'] == dst['type'])
assert (src['shape'] == dst['shape'])
self.shape = src['shape']
self.stype = src['dtype']
self.dtype = dst['dtype']
def params(self):
#The data type might be different
p = OrderedDict([('T', self.shape), ('stype', self.stype), ('dtype', self.dtype)])
return p
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def flops(self):
return 0
def elems(self):
return Utility.numElems(self.shape)
def bytes(self):
return self.elems() * (Utility.typeToBytes(self.stype) + Utility.typeToBytes(self.dtype))
class Clone(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "clone")
assert (len(args) == 1)
t = args[0]
self.shape = t['shape']
self.type = t['dtype']
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def flops(self):
return 0
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
def elems(self):
return Utility.numElems(self.shape)
def bytes(self):
return 2 * self.elems() * Utility.typeToBytes(self.type)
class Contiguous(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "contiguous")
assert (len(args) == 1)
t = args[0]
self.shape = t['shape']
self.type = t['dtype']
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def flops(self):
return 0
def bytes(self):
return 2 * Utility.numElems(self.shape) * Utility.typeToBytes(self.type)
def tc(self):
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_
class Any(OperatorLayerBase):
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
assert (mod == "Tensor")
assert (op == "any")
assert (len(args) in [1,2])
t = args[0]
# The input can be a tensor or scalar
assert (t['type'] in ["tensor", "bool"])
if t['type'] == "tensor":
self.shape = t['shape']
self.type = t['dtype']
else:
self.shape = (1,)
self.type = t['type']
self.sub = d.sub
return
def params(self):
p = OrderedDict([('T', self.shape), ('type', self.type)])
return p
def op(self):
return self.op_
def mod(self):
return self.mod_
def tc(self):
return "-"
def flops(self):
return 0
def bytes(self):
return Utility.numElems(self.shape) * Utility.typeToBytes(self.type)
| 22.56
| 97
| 0.542553
|
ecbf80fdbd8c15bfdbf3d6ddf2b1edebe1d5480d
| 4,246
|
py
|
Python
|
docs/examples/use_cases/paddle/tsm/infer.py
|
cyyever/DALI
|
e2b2d5a061da605e3e9e681017a7b2d53fe41a62
|
[
"ECL-2.0",
"Apache-2.0"
] | 3,967
|
2018-06-19T04:39:09.000Z
|
2022-03-31T10:57:53.000Z
|
docs/examples/use_cases/paddle/tsm/infer.py
|
cyyever/DALI
|
e2b2d5a061da605e3e9e681017a7b2d53fe41a62
|
[
"ECL-2.0",
"Apache-2.0"
] | 3,494
|
2018-06-21T07:09:58.000Z
|
2022-03-31T19:44:51.000Z
|
docs/examples/use_cases/paddle/tsm/infer.py
|
cyyever/DALI
|
e2b2d5a061da605e3e9e681017a7b2d53fe41a62
|
[
"ECL-2.0",
"Apache-2.0"
] | 531
|
2018-06-19T23:53:10.000Z
|
2022-03-30T08:35:59.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from paddle import fluid
import paddle
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import nvidia.dali.fn as fn
from nvidia.dali.plugin.paddle import DALIGenericIterator
from tsm import TSM
from utils import load_weights
PRETRAIN_WEIGHTS = 'https://paddlemodels.bj.bcebos.com/video_classification/TSM_final.pdparams'
def create_video_pipe(video_files, sequence_length=8, target_size=224,stride=30):
pipeline = Pipeline(1, 4, 0, seed=42)
with pipeline:
images = fn.readers.video(device="gpu", filenames=video_files,
sequence_length=sequence_length, stride=stride,
shard_id=0, num_shards=1, random_shuffle=False,
pad_last_batch=True, name="Reader")
images = fn.crop_mirror_normalize(images,
dtype=types.FLOAT,
output_layout="FCHW",
crop=(target_size, target_size),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
pipeline.set_outputs(images)
return pipeline
def build(seg_num=8, target_size=224):
image_shape = [seg_num, 3, target_size, target_size]
image = fluid.layers.data(
name='image', shape=image_shape, dtype='float32')
model = TSM()
return model(image)
def main():
seg_num = 8
target_size = 224
video_files = [FLAGS.data + '/' + f for f in os.listdir(FLAGS.data)]
pipeline = create_video_pipe(video_files, seg_num, target_size, FLAGS.stride)
video_loader = DALIGenericIterator(
pipeline, ['image'], reader_name="Reader", dynamic_shape=True)
exe = fluid.Executor(fluid.CUDAPlace(0))
startup_prog = fluid.Program()
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
fetch_list = build(seg_num, target_size)
exe.run(startup_prog)
compiled_eval_prog = fluid.CompiledProgram(eval_prog)
load_weights(exe, eval_prog, PRETRAIN_WEIGHTS)
labels = json.load(open("kinetics_labels.json"))
for idx, batch in enumerate(video_loader):
fetches = exe.run(
compiled_eval_prog, feed=batch, fetch_list=fetch_list)
pred = fetches[0][0]
topk_indices = pred.argsort()[0 - FLAGS.topk:]
topk_labels = [labels[i] for i in topk_indices]
filename = video_files[idx]
print("prediction for {} is: {}".format(filename, topk_labels))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Paddle Temporal Shift Module Inference')
parser.add_argument('data', metavar='DIR', help='Path to video files')
parser.add_argument('--topk', '-k', default=1, type=int,
metavar='K', help='Top k results (default: 1)')
parser.add_argument('--stride', '-s', default=30, type=int, metavar='S',
help='Distance between frames (default: 30)')
FLAGS = parser.parse_args()
assert FLAGS.data, "error: must provide data path"
# In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and 'data()' is only supported in static graph mode.
# So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode.
paddle.enable_static()
main()
| 38.252252
| 121
| 0.653085
|
40b82c2bfbf3cb2f4cd840fa06e5c1ad4f1746ff
| 10,550
|
py
|
Python
|
trabalho01.py
|
paulazomig/INE5609-DataStructure
|
a1d979784063f7280516604d80793abb7ba69c33
|
[
"MIT"
] | null | null | null |
trabalho01.py
|
paulazomig/INE5609-DataStructure
|
a1d979784063f7280516604d80793abb7ba69c33
|
[
"MIT"
] | null | null | null |
trabalho01.py
|
paulazomig/INE5609-DataStructure
|
a1d979784063f7280516604d80793abb7ba69c33
|
[
"MIT"
] | 1
|
2021-08-24T11:09:05.000Z
|
2021-08-24T11:09:05.000Z
|
class Nodo:
def __init__(self, id:int, dado=None, anterior=None, posterior=None):
self.__id = id
self.__dado = dado
self.__anterior = anterior
self.__posterior = posterior
@property
def id(self):
return self.__id
@id.setter
def id(self, id):
self.__id = id
@property
def dado(self):
return self.__dado
@dado.setter
def dado(self, dado):
self.__dado = dado
@property
def anterior(self) -> 'Nodo':
return self.__anterior
@anterior.setter
def anterior(self, anterior: 'Nodo'):
self.__anterior = anterior
@property
def posterior(self) -> 'Nodo':
return self.__posterior
@posterior.setter
def posterior(self, posterior: 'Nodo'):
self.__posterior = posterior
class Cursor:
def __init__(self, lista:'ListaEncadeada', nodo_atual:Nodo):
self.__lista = lista
self.__nodo_atual = nodo_atual
self.__posicao = 0
@property
def nodo_atual(self) -> Nodo:
return self.__nodo_atual
@nodo_atual.setter
def nodo_atual(self, nodo: Nodo):
self.__nodo_atual = nodo
@property
def posicao(self) -> int:
return self.__posicao
@posicao.setter
def posicao(self, posicao: int):
self.__posicao = posicao
def ir_para_o_primeiro(self):
# vai para o primeiro nodo da lista
if self.__lista.esta_vazia():
self.__posicao = 0
self.__nodo_atual = self.__lista.rabo
else:
self.__posicao = 1
self.__nodo_atual = self.__lista.cabeca.posterior
def ir_para_o_ultimo(self):
# vai para o ultimo nodo da lista
if not self.__lista.esta_vazia():
self.__posicao = self.__lista.tamanho
self.__nodo_atual = self.__lista.rabo.anterior
else:
self.__posicao = 0
self.__nodo_atual = self.__lista.cabeca
def avancar_k_posicoes(self, k:int):
# avanca k posicoes a partir da posicao atual
if self.__lista.esta_vazia():
return
if self.__posicao + k > self.__lista.tamanho:
self.ir_para_o_ultimo()
return
contador = 0
while contador < k:
self.__posicao += 1
contador += 1
self.__nodo_atual = self.__nodo_atual.posterior
def retroceder_k_posicoes(self, k:int):
# retrocede k posicoes a partir da posicao atual
if self.__lista.esta_vazia():
return
if self.__posicao - k < 1:
self.ir_para_o_primeiro()
return
contador = 0
while contador < k:
self.__posicao -= 1
contador += 1
self.__nodo_atual = self.__nodo_atual.anterior
class ListaEncadeada:
def __init__(self) -> None:
self.__conta_id = 0
self.__tamanho = 0
self.__cabeca = Nodo(id=0)
self.__rabo = Nodo(id=-1)
self.__cabeca.posterior = self.__rabo
self.__rabo.anterior = self.__cabeca
self.__cursor = Cursor(lista=self, nodo_atual=self.__cabeca)
@property
def cabeca(self) -> Nodo:
return self.__cabeca
@property
def rabo(self) -> Nodo:
return self.__rabo
@property
def cursor(self) -> Cursor:
return self.__cursor
@property
def tamanho(self) -> int:
return self.__tamanho
def acessar_atual(self) -> Nodo:
# retorna o elemento atual sob o cursor
return self.__cursor.nodo_atual
def esta_vazia(self) -> bool:
# verifica se a lista esta vazia
return self.__tamanho == 0
def __len__(self):
# retorna o numero de elementos da lista
return self.__tamanho
def posicao_de(self, dado):
# retorna a posicao e o nodo de um valor procurado na lista
if not self.contem_dado(dado):
return
posicao = 1
proximo_nodo = self.__cabeca.posterior
while proximo_nodo.dado != None:
if proximo_nodo.dado == dado:
return posicao, proximo_nodo
else:
posicao += 1
proximo_nodo = proximo_nodo.posterior
def inserir_depois_de(self, dado, atual:Nodo):
# inseri um novo elemento depois da posicao de um outro elemento
self.__conta_id += 1
posterior = atual.posterior
novo_nodo = Nodo(id=self.__conta_id, dado=dado, anterior=atual, posterior=posterior)
atual.posterior = novo_nodo
posterior.anterior = novo_nodo
self.__tamanho += 1
self.__cursor.avancar_k_posicoes(1)
def inserir_antes_de(self, dado, atual:Nodo):
# inseri um novo elemento antes da posicao de um outro elemento
self.__conta_id += 1
anterior = atual.anterior
novo_nodo = Nodo(id=self.__conta_id, dado=dado, anterior=anterior, posterior=atual)
anterior.posterior = novo_nodo
atual.anterior = novo_nodo
self.__tamanho += 1
self.__cursor.retroceder_k_posicoes(1)
def inserir_depois_do_atual(self, dado):
# inseri um novo elemento depois da posicao do elemento sob o cursor
atual = self.acessar_atual()
self.inserir_depois_de(dado, atual)
def inserir_antes_do_atual(self, dado):
# inseri um novo elemento antes da posicao do elemento sob o cursor
atual = self.acessar_atual()
self.inserir_antes_de(dado, atual)
def inserir_por_primero(self, dado):
# inseri um novo elemento no incio da lista
self.__cursor.ir_para_o_primeiro()
atual = self.acessar_atual()
self.inserir_antes_de(dado, atual)
def inserir_por_ultimo(self, dado):
# inseri um novo elemento no fim da lista
self.__cursor.ir_para_o_ultimo()
atual = self.acessar_atual()
self.inserir_depois_de(dado, atual)
def inserir_na_posicao(self, posicao:int, dado):
# inseri um novo elemento na posicao desejada
self.__cursor.ir_para_o_primeiro()
self.__cursor.avancar_k_posicoes(k=posicao-1)
self.inserir_antes_do_atual(dado)
self.__cursor.ir_para_o_primeiro()
self.__cursor.avancar_k_posicoes(k=posicao-1)
def deletar_nodo(self, nodo:Nodo):
# deleta um nodo
if self.esta_vazia():
return
anterior = nodo.anterior
posterior = nodo.posterior
anterior.posterior = posterior
posterior.anterior = anterior
self.__tamanho -= 1
nodo.anterior = nodo.posterior = nodo.dado = None
def deletar_atual(self):
# deleta um nodo sob o cursor
if self.esta_vazia():
return
atual = self.acessar_atual()
self.deletar_nodo(atual)
self.__cursor.ir_para_o_primeiro()
def deletar_primeiro(self):
# deleta o primeiro nodo da lista
if self.esta_vazia():
return
self.__cursor.ir_para_o_primeiro()
self.deletar_atual()
def deletar_ultimo(self):
# deleta o ultimo nodo da lista
if self.esta_vazia():
return
self.__cursor.ir_para_o_ultimo()
self.deletar_atual()
def deletar_dado(self, dado):
# deleta o nodo que contem o dado como valor
if self.esta_vazia():
return
_, nodo = self.posicao_de(dado)
if nodo:
self.deletar_nodo(nodo)
self.__cursor.ir_para_o_primeiro()
def deletar_da_posicao(self, posicao):
# deleta o nodo da posicao
self.__cursor.ir_para_o_primeiro()
self.__cursor.avancar_k_posicoes(k=posicao-1)
self.deletar_atual()
self.__cursor.ir_para_o_primeiro()
def contem_dado(self, dado) -> bool:
# verifica se a lista contem o dado
proximo_nodo = self.__cabeca.posterior
while proximo_nodo.dado != None:
if proximo_nodo.dado == dado:
return True
else:
proximo_nodo = proximo_nodo.posterior
return False
def imprimir_lista(self):
# imprime a lista
proximo_nodo = self.__cabeca.posterior
contador = 0
while proximo_nodo.dado != None:
contador += 1
dado = proximo_nodo.dado
print(f'[{contador}º: {dado}] ', end='')
proximo_nodo = proximo_nodo.posterior
if contador > 10:
break
print()
if __name__ == '__main__':
lista = ListaEncadeada()
print(f'tamanho da lista: {len(lista)}')
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
print()
print('Insere 7 em primero')
lista.inserir_por_primero(7)
lista.imprimir_lista()
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
print()
print('Insere 6 em primero')
lista.inserir_por_primero(6)
lista.imprimir_lista()
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
print()
print('Insere 12 em ultimo')
lista.inserir_por_ultimo(12)
lista.imprimir_lista()
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
print()
print('Insere 4 em primero')
lista.inserir_por_primero(4)
lista.imprimir_lista()
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
print()
print('Insere -1 em ultimo')
lista.inserir_por_ultimo(-1)
lista.imprimir_lista()
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
print()
print('Movimentar o cursor para primeira posicao')
lista.cursor.ir_para_o_primeiro()
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
print()
print('Verifica se contem dados na lista')
print(f'Contem 5: {lista.contem_dado(5)}')
print(f'Contem 7: {lista.contem_dado(7)}')
lista.imprimir_lista()
print()
print('Insere dado 11 na posicao 3')
lista.inserir_na_posicao(3, 11)
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
lista.imprimir_lista()
print()
print('Deletar Nodo da posicao 4')
lista.deletar_da_posicao(4)
print(f'Cursor atual: {lista.cursor.nodo_atual.dado}, posicao {lista.cursor.posicao}')
lista.imprimir_lista()
print()
| 30.668605
| 92
| 0.622275
|
38d32d3e18274862d02b63e43a2ae970215ae5ac
| 28,677
|
py
|
Python
|
gropylib.py
|
rpkarandev/propylib
|
e2813b045761e43515cdb93859342566bc3edbbb
|
[
"CC0-1.0"
] | null | null | null |
gropylib.py
|
rpkarandev/propylib
|
e2813b045761e43515cdb93859342566bc3edbbb
|
[
"CC0-1.0"
] | null | null | null |
gropylib.py
|
rpkarandev/propylib
|
e2813b045761e43515cdb93859342566bc3edbbb
|
[
"CC0-1.0"
] | null | null | null |
r#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 10:10:05 2019
functions to handle gromacs output files
@author: karan
"""
import re
from glob import glob
import numpy as np
import pandas as pd
def assign_mass(s):
if (s=="C"):
return 12.0107
elif (s=="H"):
return 1.00794
elif (s=="N"):
return 14.0067
elif (s=="O"):
return 15.9994
elif (s=="P"):
return 30.973762
elif (s=="S"):
return 32.065
else:
print("unidentified atom: error mass not assigned", s)
return 0
class coord():
'Special class for coordinates and related function'
def __init__(self, d=3):
self.n = 0
self.dim = d
self.data = np.empty((self.n, self.dim), dtype=np.float) #### peformance tuning need to be done
def append(self, newcoord):
newcoord = np.reshape(newcoord, (1, self.dim))
self.data = np.append(self.data, newcoord, axis=0)
self.n +=1
def extend(self, newcoords):
newcoords = np.array(newcoords)
self.data = np.append(self.data, newcoords, axis=0)
self.n += newcoords.shape[0]
def __getitem__(self, index):
index1, index2 = index
if isinstance(index1[0], bool) :
index1 = [x for x in range(len(index1)) if index1[x] == True ]
if isinstance(index2[0], bool) :
index2 = [x for x in range(len(index2)) if index2[x] == True ]
return self.data[np.ix_(index1, index2)]
def transform(self, tcoord):
self.data = self.data + tcoord
def __len__(self):
return self.n
# include rotation
class atomgrp_o:
'Structure for storing pdb data of atoms/hetatoms'
def __init__(self):
self.atomno = []
self.atomname =[]
self.atomtype = []
self.occupancy = []
self.bfac = []
self.resno = []
self.resname = []
self.chain = []
self.coords = coord()
def add(self, record):
self.atomno.append(record[0])
self.atomname.append(record[1])
self.resname.append(record[2])
self.chain.append(record[3])
self.resno.append(record[4])
self.coords.append( record[5] )
self.occupancy.append(record[6])
self.bfac.append(record[7])
self.atomtype.append(record[8])
class atomgrp:
'Structure for storing pdb data of atoms/hetatoms'
def __init__(self):
self.atomno = np.empty((0), dtype=int)
self.atomname =np.empty((0), dtype='<U3')
self.atomtype = np.empty((0), dtype='<U2')
self.occupancy = np.empty((0), dtype=float)
self.bfac = np.empty((0), dtype=float)
self.resno = np.empty((0), dtype=int)
self.resname = np.empty((0), dtype='<U3')
self.chain = np.empty((0), dtype='<U2')
self.coords = coord()
def add(self, record):
self.atomno = np.append(self.atomno, record[0])
self.atomname = np.append(self.atomname, record[1])
self.resname = np.append(self.resname, record[2])
self.chain = np.append(self.chain, record[3])
self.resno = np.append(self.resno, record[4])
self.coords.append( record[5] )
self.occupancy = np.append(self.occupancy, record[6])
self.bfac = np.append(self.bfac, record[7])
self.atomtype = np.append(self.atomtype, record[8])
def read_xvg(filename):
data = []
cols = []
comment = []
ylabel = []
with open(filename, 'r') as infile:
line = infile.readline()
while line:
if line.startswith('@'):
line_st = line.split()
if line_st[1] == 'xaxis':
m = re.search('"(.*)"', line)
if m is not None:
cols.append( m.groups()[0] )
elif line_st[1] == 'yaxis':
m = re.search('"(.*)"', line)
if m is not None:
ylabel.append( m.groups()[0] )
elif line_st[1][0] == 's' and line_st[2] == 'legend' :
m = re.search('legend\s+"(.*)"', line)
if m is not None:
cols.append( m.groups()[0] )
elif line.startswith('#'):
comment.append( line.strip() )
else:
#elif line.startswith(' '):
#print(line)
data.append( line.split() )
line = infile.readline()
for i in range(len(data[0])-len(cols)):
cols = cols + [str(i)]
data = pd.DataFrame( data, columns=cols ).astype('float')
return data, comment, ylabel
def read_xvg_notnum(filename):
data = []
cols = []
comment = []
ylabel = []
with open(filename, 'r') as infile:
line = infile.readline()
while line:
if line.startswith('@'):
line_st = line.split()
if line_st[1] == 'xaxis':
m = re.search('"(.*)"', line)
if m is not None:
cols.append( m.groups()[0] )
elif line_st[1] == 'yaxis':
m = re.search('"(.*)"', line)
if m is not None:
ylabel.append( m.groups()[0] )
elif line_st[1][0] == 's' and line_st[2] == 'legend' :
m = re.search('legend\s+"(.*)"', line)
if m is not None:
cols.append( m.groups()[0] )
elif line.startswith('#'):
comment.append( line.strip() )
else:
#elif line.startswith(' '):
#print(line)
data.append( line.split() )
line = infile.readline()
for i in range(len(data[0])-len(cols)):
cols = cols + [str(i)]
data = pd.DataFrame( data, columns=cols )#.astype('float')
return data, comment, ylabel
def read_rawxpm(filename):
data = []
comment = []
xlabel = ''
ylabel = ''
xaxis = []
yaxis=[]
strings = []
legenddict = {}
valdict = {}
with open(filename, 'r') as infile:
line = infile.readline()
while line:
#print(line)
if xlabel == '':
m = re.search('/\* x-label: "(.*)" \*/', line)
if m != None :
xlabel = m.groups()[0]
elif ylabel == '':
m = re.search('/\* y-label: "(.*)" \*/', line)
if m != None :
ylabel = m.groups()[0]
elif line[0] == '"' and len(xaxis) == 0 :
m = re.search('"(.*?)\s+[c]*\s+(.*)\s+" /\*\s"(.*)"\s\*/', line)
if m != None:
string = m.groups()[0]
legenddict[ m.groups()[0] ] = m.groups()[2]
valdict[ m.groups()[0] ] = m.groups()[1]
elif line[:10] == "/* x-axis:":
m = re.search('/\* x-axis:\s+(.*) \*/', line)
xaxis.extend( m.groups()[0].strip().split() )
elif line[:10] == "/* y-axis:":
m = re.search('/\* y-axis:\s+(.*) \*/', line)
yaxis.extend( m.groups()[0].strip().split() )
elif line[0] == '"' and len(xaxis) > 0 :
m = re.search('"(.*)"', line)
string = m.groups()[0]
strings.append( list(string) )
line = infile.readline()
data = pd.DataFrame(strings, ).T
data.columns = [ylabel + '_' + y for y in yaxis]
data[xlabel] = pd.Series(xaxis).astype('float')
data = data.loc[:, [ data.columns[-1] ] + data.columns[:-1].values.tolist() ]
return data, comment, legenddict, valdict, ylabel
def writeNDXfile(grpIndexDic, indexFileName):
with open(indexFileName, 'w') as ofile:
s = ''
for key in grpIndexDic.keys():
s += f'[ {key} ]\n'
ld = 10
for ai, a in enumerate( grpIndexDic[key] ):
if ai%ld == 0:
s += '\n'
s += f'{a:5d} '
s += '\n\n'
ofile.write(s)
return None
def read_numxpm(filename):
data, comment, legenddict, valdict, ylabel = read_rawxpm(filename)
if len( data ) > 0:
data.iloc[:,1:] = data.iloc[:,1:].apply(lambda x: x.apply( lambda y: legenddict[y] ) )
return data, comment, legenddict, valdict, ylabel
else:
print("error reading xpm")
return None
def read_ssxpm(filename):
data, comment, legenddict, valdict, ylabel = read_rawxpm(filename)
if len( data ) > 0:
cols = []
chsep = []
try:
chsep = [x for x in legenddict.keys() if legenddict[x] == "Chain_Separator"]
except:
print("No chainseparators are found")
chno = 1
resno = 1
for x in data.iloc[0, 1:]: # first row, assign chain based on that
if x in chsep:
chno += 1
resno = 1
cols.append( "chsep" )
else:
cols.append( "ch"+str(chno)+"_res"+str(resno) )
resno += 1
data.columns = [data.columns[0]] + cols
data = data.loc[:, data.columns != 'chsep']
return data, comment, legenddict, valdict, ylabel
else:
print("error reading xpm")
return None
def read_gro(filename):
try :
with open(filename, 'r') as infile:
data = []
line = infile.readline()
title = line
m = re.search('t=([\d.]*)', line)
if m != None:
frtime = float(m.groups()[0])
else:
frtime = 0.0
line = infile.readline()
m = re.search('t=([\d.]*)', line)
tnmol = int(line.strip())
line = infile.readline()
line1 = infile.readline()
while line1 :
data.append( [ line[:5].strip(), line[5:10].strip(), line[10:15].strip(), line[15:20].strip(), line[20:28].strip(), line[28:36].strip(), line[36:44].strip(), line[44:52].strip(), line[52:60].strip(), line[60:68].strip() ] )
line = line1
line1 = infile.readline()
box = line.strip().split()
box = [float(x) for x in box]
cols =['resno', 'resname', 'aname', 'ano', 'x', 'y', 'z', 'vx', 'vy', 'vz']
dtypes = ['int64', 'object', 'object', 'int64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64']
dtypes = ['int', 'object', 'object', 'int', 'float', 'float', 'float', 'float', 'float', 'float']
dtypesdict = dict( zip( cols, dtypes) )
data = pd.DataFrame( data, columns=cols)
data = data.astype( dtypesdict )
return data, title, frtime, box
except:
print("read_gro: error -> unable to open the file")
class read_pdbtraj:
'Class for storing PDB data'
def __init__(self, filename, framestart=0, frameend=0, chainids=[], nresperch=0, verbose=1):
self.fname = ((filename.split('/'))[-1].split('.'))[0]
if type(chainids) != list :
print("read_pdbtraj: Error chainids must be a list")
return -100
elif type(nresperch) != int or nresperch < 0 :
print("read_pdbtraj: Enter valid nresperch ")
return -100
elif type(framestart) != int or framestart < 0 :
print("read_pdbtraj: Enter valid framestart ")
return -100
elif type(frameend) != int or frameend < 0 :
print("read_pdbtraj: Enter valid framestart ")
return -100
frameend = 10000000 if frameend < 1 else frameend
nchs_to_get = len(chainids)
self.chains={}
self.resnos = []
self.resnames = []
self.resnames1 = []
self.atom = atomgrp()
self.hetatom = atomgrp()
self.indCalpha = []
self.indBB = []
self.indnH = []
self.res_many_occurence = []
modelflag = False
modelcounter = -1
atom_recs_l = []
atom_coords_l = []
atom_coords_list = []
hetatom_recs_l = []
hetatom_coords_l = []
hetatom_coords_list = []
readmodelcounter = 0
framelist = []
timelist = []
boxlist = []
print( " framend ", frameend) if verbose > 0 else None
with open(filename, 'r') as pdbfile :
line = pdbfile.readline()
while line and modelcounter <= frameend:
#print( line )
if line[:5] == 'TITLE':
#print(line)
#m = re.search('t=\s+([0-9]*\.[0-9]*)\s+', line)
m = re.search('t=\s+([0-9\.]*)\s+', line)
if m != None:
#print(m.groups()[0].strip())
frtime = float(m.groups()[0].strip())
else:
frtime = 0.5
if line[:6] == 'CRYST1':
#print(line)
m = re.search('CRYST1\s+([0-9\.]*)\s+([0-9\.]*)\s+([0-9\.]*)\s+', line)
if m != None:
#print(m.groups()[0].strip())
box = [ float(m.groups()[0].strip()), float(m.groups()[0].strip()), float(m.groups()[0].strip())]
else:
box = [0.0, 0.0, 0.0]
#print(box)
if line[:5] == 'MODEL':
modelcounter += 1
if modelcounter >= framestart and modelcounter<= frameend:
modelflag = True
print("read_pdbtraj: reading model ", modelcounter) if verbose > 0 else None
readmodelcounter += 1
framelist.append(modelcounter)
timelist.append( frtime )
boxlist.append( box )
if len( atom_coords_l ) > 0 or len( hetatom_coords_l ) > 0 :
atom_coords_list.append( atom_coords_l )
atom_coords_l = []
hetatom_coords_list.append( hetatom_coords_l )
hetatom_coords_l = []
else:
modelflag = False
if readmodelcounter == 1:
if modelflag and nchs_to_get == 0: ## Read atom coordinates
if line[:6] == 'ATOM ' :
atom_recs_l.append( [int(line[6:11].strip()), line[12:16].strip(), line[17:20].strip(), line[21:22].strip(), int(line[22:26].strip()), float(line[54:60].strip()), float(line[60:66].strip()), line[76:78].strip() ] )
#atom_coords_l.append([modelcounter, frtime, float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
atom_coords_l.append([float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
if line[:6] == 'HEATOM':
hetatom_recs_l.append( [int(line[6:11].strip()), line[12:16].strip(), line[17:20].strip(), line[21:22].strip(), int(line[22:26].strip()), float(line[54:60].strip()), float(line[60:66].strip()), line[76:78].strip() ] )
#hetatom_coords_l.append([modelcounter, frtime, float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
hetatom_coords_l.append([ float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
#line = pdbfile.readline()
if modelflag and nchs_to_get > 0: ## Read atom coordinates from chain
if line[:6] == 'ATOM ' and line[20:22].strip() in chainids :
atom_recs_l.append( [int(line[6:11].strip()), line[12:16].strip(), line[17:20].strip(), line[21:22].strip(), int(line[22:26].strip()), float(line[54:60].strip()), float(line[60:66].strip()), line[76:78].strip() ] )
#atom_coords_l.append([modelcounter, frtime, float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
atom_coords_l.append([float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
if line[:6] == 'HEATOM':
hetatom_recs_l.append( [int(line[6:11].strip()), line[12:16].strip(), line[17:20].strip(), line[21:22].strip(), int(line[22:26].strip()), float(line[54:60].strip()), float(line[60:66].strip()), line[76:78].strip() ] )
#hetatom_coords_l.append([modelcounter, frtime, float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
hetatom_coords_l.append([ float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ])
#line = pdbfile.readline()
else:
if modelflag and nchs_to_get == 0: ## Read atom coordinates
if line[:6] == 'ATOM ' :
#atom_coords_l.append([modelcounter, frtime, float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
atom_coords_l.append([float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
if line[:6] == 'HEATOM':
#hetatom_coords_l.append([modelcounter, frtime, float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
hetatom_coords_l.append([ float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
#line = pdbfile.readline()
if modelflag and nchs_to_get > 0: ## Read atom coordinates from chain
if line[:6] == 'ATOM ' and line[20:22].strip() in chainids :
#atom_coords_l.append([modelcounter, frtime, float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
atom_coords_l.append([float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
if line[:6] == 'HEATOM':
#hetatom_coords_l.append([modelcounter, frtime, float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ] )
hetatom_coords_l.append([ float(line[30:38].strip()), float(line[38:46].strip()), float(line[46:54].strip()) ])
#line = pdbfile.readline()
line = pdbfile.readline()
#print(line)
cols1 = ['atomno', 'atomname', 'resname', 'chain', 'resno', 'occupancy', 'bfac', 'atomtype']
#cols2 = ['frame', 'time', 'X', 'Y', 'Z']
self.atom_recs = pd.DataFrame( atom_recs_l, columns=cols1 )
self.hetatom_recs = pd.DataFrame( hetatom_recs_l, columns=cols1 )
#self.atom_coords = pd.DataFrame( atom_coords_l, columns=cols2 )
#self.hetatom_coords = pd.DataFrame( hetatom_coords_l, columns=cols2 )
self.atom_coords = np.array( atom_coords_list )
self.hetatom_coords = pd.DataFrame( hetatom_coords_list )
self.framelist = pd.DataFrame( )
self.framelist['frameno'] = framelist
self.framelist['time'] = timelist
self.framelist['boxxyz'] = boxlist
if nresperch > 0:
resnoc_list = [] # continuous resno starts from 0
resnoc = -1
resno_old = -100
for resno in self.atom_recs.resno:
if resno_old != resno:
resnoc += 1
resnoc_list.append( resnoc )
resno_old = resno
else:
resnoc_list.append( resnoc )
self.atom_recs['resnoc'] = resnoc_list
self.atom_recs['chainidc'] = self.atom_recs.resnoc.apply(lambda x: int( np.floor(x/nresperch) ) )
self.atom_recs['resnoch'] = self.atom_recs.resnoc.apply(lambda x: int( x%nresperch ) )
self.atom_recs['m'] = self.atom_recs.atomtype.apply( assign_mass )
# self.resnos = [ [ self.atom.resno[y] for y in x ] for x in self.indCalpha]
# self.resnames = [ [ self.atom.resname[y] for y in x ] for x in self.indCalpha]
# self.resnames1 = [ [ aminoAAA2A(y) for y in x ] for x in self.resnames]
# self.indBB = [[x for x in range(len(self.atom.atomname)) if (self.atom.atomname[x] == 'N') or (self.atom.atomname[x] == 'CA') or (self.atom.atomname[x] == 'C')] for y in self.chains]
# self.indnH = [[x for x in range(len(self.atom.atomname)) if (self.atom.atomname[x] != 'H') or (self.atom.atomname[x] != 'D') ] for y in self.chains]
def cal_principalaxes(chcoordsfr, chm):
com = ( chcoordsfr * chm ).sum(axis=0)/chm.sum()
chcoordsfr = chcoordsfr - com
chcoordsfr2 = chcoordsfr * chcoordsfr
I11 = (chm * (chcoordsfr2[:, 1] + chcoordsfr2[:, 2])).sum()
I22 = (chm * (chcoordsfr2[:, 0] + chcoordsfr2[:, 2])).sum()
I33 = (chm * (chcoordsfr2[:, 0] + chcoordsfr2[:, 1])).sum()
I12 = -(chm * chcoordsfr[:, 0] * chcoordsfr[:, 1] ).sum()
I13 = -(chm * chcoordsfr[:, 0] * chcoordsfr[:, 2] ).sum()
I23 = -(chm * chcoordsfr[:, 1] * chcoordsfr[:, 2] ).sum()
I = np.array( [I11, I12, I13, I12, I22, I23, I13, I23, I33]).reshape( (3,3) )
eigval, eigvec = np.linalg.eig( I )
return eigvec, com, eigval
def cal_largestaxis(chcoordsfr, chm):
#n = 1000
# chcoordsfr = np.random.normal(size=3*n).reshape( (n, 3) ) + 100
#chm = np.ones( (n,1) )
eigvec, com, eigval = cal_principalaxes(chcoordsfr, chm)
eigvalmaxind = np.argmax(np.abs(eigval))
eigvecmax = eigvec[:, eigvalmaxind ]
return eigvecmax, com
def get_neighbours(clusters_temp, slist):
temp = []
for s in slist:
temp += clusters_temp[s]
return list( set(temp) )
def get_clusters( distmat0, distcutoff ):
distmat = distmat0 <= distcutoff
cluster_rows = [x for x in range(len(distmat))]
clusters_temp = [np.where( distmat[ci, :] )[0].tolist() for ci in cluster_rows]
clusters_temp1 = []
while len(cluster_rows) > 0:
ci = cluster_rows.pop( 0 )
clust = clusters_temp[ ci ]
nclust = get_neighbours(clusters_temp, clust)
while len(clust) != len(nclust):
clust = nclust.copy()
nclust = get_neighbours(clusters_temp, clust)
cluster_rows = [x for x in cluster_rows if x not in nclust]
clusters_temp1.append( nclust )
clus_centers = []
for clus in clusters_temp1:
ind_min = np.argmin(np.mean( distmat0[ clus, : ][:, clus], axis=1))
clus_centers.append( clus[ ind_min ] )
return clusters_temp1, clus_centers
def get_all_clusters(pairdist, distcutoff ):
nch1 = int( np.sqrt( pairdist.shape[1]-1 ) )
distmats = pairdist.iloc[:,1:].values
clus_stat = []
sizecounts = {}
ncounts = {}
for x in range( distmats.shape[0] ) :
sizecount = {'1':0, '2':0, '3-4':0, '5-6':0, '7-8':0, '9+':0, '11+':0,'13+':0}
ncount = {'1':0, '2':0, '3-4':0, '5-6':0, '7-8':0, '9+':0, '11+':0,'13+':0}
distmat0 = distmats[x,:].reshape( (nch1, nch1) )
clusters, clus_centers = get_clusters( distmat0, distcutoff )
clus_lens = [ len(x) for x in clusters ]
clus_stat.append( [len(clusters), np.max(clus_lens), np.mean(clus_lens) ])
for cl in clus_lens:
if cl == 1:
sizecount['1'] += cl
ncount['1'] += 1
elif cl == 2:
sizecount['2'] += cl
ncount['2'] += 1
elif cl in [3,4]:
sizecount['3-4'] += cl
ncount['3-4'] += 1
elif cl in [5,6] :
sizecount['5-6'] += cl
ncount['5-6'] += 1
elif cl in [7,8] :
sizecount['7-8'] += cl
ncount['7-8'] += 1
elif cl > 8 :
sizecount['9+'] += cl
ncount['9+'] += 1
if cl > 10 :
sizecount['11+'] += cl
ncount['11+'] += 1
if cl > 12 :
sizecount['13+'] += cl
ncount['13+'] += 1
sizecounts[x] = sizecount
ncounts[x] = ncount
return np.array( clus_stat ), pd.DataFrame( sizecounts ).T, pd.DataFrame( ncounts ).T
def get_all_clusters_update1(pairdist, distcutoff ):
nch1 = int( np.sqrt( pairdist.shape[1]-1 ) )
distmats = pairdist.iloc[:,1:].values
clus_stat = []
sizecounts = {}
ncounts = {}
for x in range( distmats.shape[0] ) :
sizecount = {'1':0, '2':0, '3-8':0, '9-16':0, '17-24':0, '24+':0}
ncount = {'1':0, '2':0, '3-8':0, '9-16':0, '17-24':0, '24+':0}
distmat0 = distmats[x,:].reshape( (nch1, nch1) )
clusters, clus_centers = get_clusters( distmat0, distcutoff )
clus_lens = [ len(x) for x in clusters ]
clus_stat.append( [len(clusters), np.max(clus_lens), np.mean(clus_lens) ])
for cl in clus_lens:
if cl == 1:
sizecount['1'] += cl
ncount['1'] += 1
elif cl == 2:
sizecount['2'] += cl
ncount['2'] += 1
elif cl < 9:
sizecount['3-8'] += cl
ncount['3-8'] += 1
elif cl < 17 :
sizecount['9-16'] += cl
ncount['9-16'] += 1
elif cl < 25 :
sizecount['17-24'] += cl
ncount['17-24'] += 1
elif cl >= 25 :
sizecount['24+'] += cl
ncount['24+'] += 1
sizecounts[x] = sizecount
ncounts[x] = ncount
return np.array( clus_stat ), pd.DataFrame( sizecounts ).T, pd.DataFrame( ncounts ).T
def get_all_clusters_detail(pairdist, distcutoff, frameno=-1 ):
nch1 = int( np.sqrt( pairdist.shape[1]-1 ) )
distmats = pairdist.iloc[:,1:].values
clus_stat = []
sizecount = {'1':0, '2':0, '3-4':0, '5-6':0, '7-8':0, '9+':0, '11+':0,'13+':0}
ncount = {'1':0, '2':0, '3-4':0, '5-6':0, '7-8':0, '9+':0, '11+':0,'13+':0}
## Take only the last frame
distmat0 = distmats[frameno,:].reshape( (nch1, nch1) )
clusters, clus_centers = get_clusters( distmat0, distcutoff )
clus_lens = [ len(x) for x in clusters ]
clus_stat.append( [len(clusters), np.max(clus_lens), np.mean(clus_lens) ])
for cl in clus_lens:
if cl == 1:
sizecount['1'] += cl
ncount['1'] += 1
elif cl == 2:
sizecount['2'] += cl
ncount['2'] += 1
elif cl in [3,4]:
sizecount['3-4'] += cl
ncount['3-4'] += 1
elif cl in [5,6] :
sizecount['5-6'] += cl
ncount['5-6'] += 1
elif cl in [7,8] :
sizecount['7-8'] += cl
ncount['7-8'] += 1
elif cl > 8 :
sizecount['9+'] += cl
ncount['9+'] += 1
if cl > 10 :
sizecount['11+'] += cl
ncount['11+'] += 1
if cl > 12 :
sizecount['13+'] += cl
ncount['13+'] += 1
return clusters, clus_centers, clus_lens, np.array( clus_stat ), sizecount, ncount
| 45.8832
| 249
| 0.481536
|
d32ea621d5d0dd51fdba343f3af6ccd0e65733d1
| 6,690
|
py
|
Python
|
ZipFolder/Fetch Shapefiles.py
|
mattCensus/PerlScripts
|
d2643d99abc3f0647ebfbd41f7e5faa704da3e91
|
[
"MIT"
] | null | null | null |
ZipFolder/Fetch Shapefiles.py
|
mattCensus/PerlScripts
|
d2643d99abc3f0647ebfbd41f7e5faa704da3e91
|
[
"MIT"
] | null | null | null |
ZipFolder/Fetch Shapefiles.py
|
mattCensus/PerlScripts
|
d2643d99abc3f0647ebfbd41f7e5faa704da3e91
|
[
"MIT"
] | null | null | null |
'''
This script will download TIGER data shapefiles from the Census FTP site.
It can be used to download a set of geographies defined in GEO_TYPES_LIST,
or can be used to fetch files for a single state and/or single geography type.
Pass an -s argument to limit by state, pass a -g argument to limit
to a single geography type, and/or pass a -y argument to change the year
from 2012 to something else (e.g. 2015).
>> python fetch_shapefiles.py
>> python fetch_shapefiles.py -s WA
>> python fetch_shapefiles.py -g place
>> python fetch_shapefiles.py -y 2015
>> python fetch_shapefiles.py -s WA -g place -y 2015
If you use the -s argument to fetch files for a single state, the script
will also download the national county, state and congressional district
files that include data for your chosen state.
The script will create DOWNLOAD_DIR and EXTRACT_DIR directories
if necessary, fetch a zipfile or set of zipfiles from the Census website,
then extract the shapefiles from each zipfile retrieved.
DISABLE_AUTO_DOWNLOADS will prevent certain geography types from being
automatically downloaded if no -g argument is passed to fetch_shapefiles.py.
This may be useful because certain files, such as those for Zip Code
Tabulation Areas, are extremely large. You can still target any geography
in GEO_TYPES_LIST specifically, however. So to fetch the ZCTA data:
>> python fetch_shapefiles.py -g zcta5
'''
import optparse
import os
import sys
import zipfile
from os.path import isdir, join, normpath
try:
from six.moves.urllib import request as urllib2
except ImportError:
import urllib2
from __init__ import (DOWNLOAD_DIR, EXTRACT_DIR, STATE_ABBREV_LIST,
GEO_TYPES_LIST, DISABLE_AUTO_DOWNLOADS,
get_fips_code_for_state)
FTP_HOME = 'ftp://ftp2.census.gov/geo/tiger/TIGER2021/'
def get_filename_list_from_ftp(target, state):
target_files = urllib2.urlopen(target).read().splitlines()
filename_list = []
for line in target_files:
filename = '%s%s' % (target, line.decode().split()[-1])
filename_list.append(filename)
if state:
state_check = '_%s_' % get_fips_code_for_state(state)
filename_list = filter(
lambda filename:
state_check in filename or
('_us_' in filename and
'_us_zcta5' not in filename),
filename_list
)
return filename_list
def get_content_length(u):
# u is returned by urllib2.urlopen
if sys.version_info[0] == 2:
return int(u.info().getheader("Content-Length"))
else:
return int(u.headers["Content-Length"])
def download_files_in_list(filename_list, force=False):
downloaded_filename_list = []
for file_location in filename_list:
filename = '%s/%s' % (DOWNLOAD_DIR, file_location.split('/')[-1])
if force or not os.path.exists(filename):
# Only download if required.
u = urllib2.urlopen(file_location)
f = open(filename, 'wb')
file_size = get_content_length(u)
print("Downloading: %s Bytes: %s" % (filename, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (
file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8) * (len(status) + 1)
sys.stdout.write(status)
sys.stdout.flush()
f.close()
downloaded_filename_list.append(filename)
return downloaded_filename_list
def extract_downloaded_file(filename, remove_on_error=True):
zip_dir = filename.replace('.zip', '').split('/')[-1]
target_dir = normpath(join(EXTRACT_DIR, zip_dir))
print("Extracting: " + filename + " ...")
try:
zipped = zipfile.ZipFile(filename, 'r')
except zipfile.BadZipFile as ze:
if remove_on_error:
os.remove(filename)
raise Exception(
"Removed corrupt zip file (%s). Retry download." % filename)
raise ze
zipped.extractall(target_dir)
zipped.close()
def get_one_geo_type(geo_type, state=None, year='2012'):
target = '%s%s/' % (FTP_HOME.replace('2012', year), geo_type.upper())
print("Finding files in: " + target + " ...")
filename_list = get_filename_list_from_ftp(target, state)
downloaded_filename_list = download_files_in_list(filename_list)
for filename in downloaded_filename_list:
extract_downloaded_file(filename)
def get_all_geo_types(state=None, year='2012'):
AUTO_DOWNLOADS = filter(
lambda geo_type: geo_type not in DISABLE_AUTO_DOWNLOADS,
GEO_TYPES_LIST
)
for geo_type in AUTO_DOWNLOADS:
get_one_geo_type(geo_type, state, year)
def process_options(arglist=None):
global options, args
parser = optparse.OptionParser()
parser.add_option(
'-s', '--state',
dest='state',
help='specific state to download',
choices=STATE_ABBREV_LIST,
default=None
)
parser.add_option(
'-g', '--geo', '--geo_type',
dest='geo_type',
help='specific geographic type to download',
choices=GEO_TYPES_LIST,
default=None
)
parser.add_option(
'-y', '--year',
dest='year',
help='specific year to download',
default='2012'
)
options, args = parser.parse_args(arglist)
return options, args
def main(args=None):
"""
>> python fetch_shapefiles.py
>> python fetch_shapefiles.py -s WA
>> python fetch_shapefiles.py -g place
>> python fetch_shapefiles.py -s WA -g place
"""
if args is None:
args = sys.argv[1:]
options, args = process_options(args)
# make sure we have the expected directories
for path in [DOWNLOAD_DIR, EXTRACT_DIR]:
if not isdir(path):
os.makedirs(path)
# get one geo_type or all geo_types
if options.geo_type:
get_one_geo_type(
geo_type = options.geo_type,
state = options.state,
year=options.year
)
else:
get_all_geo_types(
state = options.state,
year=options.year
)
if __name__ == '__main__':
main()
| 32.955665
| 79
| 0.624664
|
0ac9ddaf8297571d4b89d9f57c35aa4196098fd7
| 13,956
|
py
|
Python
|
eoxserver/services/ows/wps/parameters/complexdata.py
|
ESA-VirES/eoxserver
|
d7b65adf9317538b267d5cbb1281acb72bc0de2c
|
[
"OML"
] | 1
|
2017-11-21T22:23:30.000Z
|
2017-11-21T22:23:30.000Z
|
eoxserver/services/ows/wps/parameters/complexdata.py
|
ESA-VirES/eoxserver
|
d7b65adf9317538b267d5cbb1281acb72bc0de2c
|
[
"OML"
] | null | null | null |
eoxserver/services/ows/wps/parameters/complexdata.py
|
ESA-VirES/eoxserver
|
d7b65adf9317538b267d5cbb1281acb72bc0de2c
|
[
"OML"
] | null | null | null |
#-------------------------------------------------------------------------------
#
# WPS Complex Data type
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
# Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import os
import os.path
import json
from lxml import etree
from copy import deepcopy
from StringIO import StringIO
try:
from cStringIO import StringIO as FastStringIO
except ImportError:
FastStringIO = StringIO
try:
# available in Python 2.7+
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
from .base import Parameter
from .formats import Format
#-------------------------------------------------------------------------------
# complex data - data containers
class CDBase(object):
""" Base class of the complex data container. """
def __init__(self, mime_type=None, encoding=None, schema=None, format=None):
if isinstance(format, Format):
self.mime_type = format.mime_type
self.encoding = format.encoding
self.schema = format.schema
else:
self.mime_type = mime_type
self.encoding = encoding
self.schema = schema
class CDObject(CDBase):
""" Complex data wraper arround an arbitraty python object.
To be used to set custom format attributes for the XML
and JSON payload.
NOTE: CDObject is not used for the input JSON and XML.
"""
def __init__(self, data, mime_type=None, encoding=None, schema=None,
format=None):
CDBase.__init__(self, mime_type, encoding, schema, format)
self.data = data
class CDByteBuffer(StringIO, CDBase):
""" Complex data binary in-memory buffer (StringIO).
To be used to hold a generic binary (byte-stream) payload.
"""
def __init__(self, data='', mime_type=None, encoding=None, schema=None,
format=None):
StringIO.__init__(self, str(data))
CDBase.__init__(self, mime_type, encoding, schema, format)
def write(self, data):
StringIO.write(self, str(data))
@property
def data(self):
self.seek(0)
return self.read()
class CDTextBuffer(StringIO, CDBase):
""" Complex data text (unicode) in-memory buffer (StringIO).
To be used to hold generic text. The the text payload
is stored as a unicode-stream.
Set the 'text_encoding' parameter is unicode encoding/decoding
shall be applied.
"""
def __init__(self, data=u'', mime_type=None, encoding=None, schema=None,
format=None, text_encoding=None):
StringIO.__init__(self, unicode(data))
CDBase.__init__(self, mime_type, encoding, schema, format)
self.text_encoding = text_encoding
@property
def data(self):
self.seek(0)
return self.read()
def write(self, data):
if self.text_encoding is None:
return StringIO.write(self, unicode(data))
else:
return StringIO.write(self, unicode(data, self.text_encoding))
def read(self, size=None):
if size is None:
data = StringIO.read(self)
else:
data = StringIO.read(self, size)
if self.text_encoding is None:
return data
else:
return data.encode(self.text_encoding)
class CDAsciiTextBuffer(CDByteBuffer):
""" Complex data text (ascii) in-memory buffer (StringIO).
To be used to hold generic ascii text. The the text payload
is stored as a byte-stream and this class cannot hold
characters outside of the 7-bit ascii characters' range.
"""
def __init__(self, data='', mime_type=None, encoding=None, schema=None,
format=None, text_encoding=None):
CDByteBuffer.__init__(self, data, mime_type, encoding, schema, format)
self.text_encoding = text_encoding
def write(self, data):
if not isinstance(data, basestring):
data = str(data)
StringIO.write(self, data.encode('ascii'))
def read(self, size=None):
if size is None:
data = StringIO.read(self)
else:
data = StringIO.read(self, size)
if self.text_encoding is None:
return data
else:
return data.encode(self.text_encoding)
class CDFile(CDBase):
""" Complex data binary file.
To be used to hold a generic binary (byte-stream) payload.
NOTE: The file allows you to specify whether the file is
temporary (will be atomatically removed - by default)
or permanent (preserverved after object destruction).
"""
def __init__(self, name, mode='r', buffering=-1,
mime_type=None, encoding=None, schema=None, format=None,
remove_file=True):
CDBase.__init__(self, mime_type, encoding, schema, format)
self._file = file(name, mode, buffering)
self._remove_file = remove_file
def __del__(self):
name = self.name
self.close()
if self._remove_file:
os.remove(name)
@property
def data(self):
self.seek(0)
return self.read()
def __getattr__(self, attr):
return getattr(self._file, attr)
class CDPermanentFile(CDFile):
""" Complex data permanent binary file.
To be used to hold a generic binary (byte-stream) payload.
NOTE: This class preserves the actual file.
"""
def __init__(self, remove_file, name, mode='r', buffering=-1,
mime_type=None, encoding=None, schema=None, format=None):
CDFile.__init__(name, mode, buffering, mime_type, encoding, schema,
format, False)
#-------------------------------------------------------------------------------
class ComplexData(Parameter):
""" complex-data parameter class """
def __init__(self, identifier, formats, *args, **kwargs):
""" Object constructor.
Parameters:
identifier identifier of the parameter.
title optional human-readable name (defaults to identifier).
abstract optional human-readable verbose description.
metadata optional metadata (title/URL dictionary).
optional optional boolean flag indicating whether the input
parameter is optional or not.
formats List of supported formats.
"""
super(ComplexData, self).__init__(identifier, *args, **kwargs)
self.formats = OrderedDict()
if isinstance(formats, Format):
formats = (formats,)
for frm in formats:
self.formats[(frm.mime_type, frm.encoding, frm.schema)] = frm
@property
def default_format(self):
return self.formats.itervalues().next()
def get_format(self, mime_type, encoding=None, schema=None):
if mime_type is None:
return self.default_format
else:
return self.formats.get((mime_type, encoding, schema))
# def verify_format(self, format):
# """ Returns valid format or rise value error exception."""
# if format is None:
# return self.default_format
# tmp = (format.mime_type, format.encoding, format.schema)
# if tmp in self.formats:
# return format
# raise ValueError("Invalid format %r"%format)
def parse(self, data, mime_type, schema, encoding, **opt):
""" parse input complex data """
format_ = self.get_format(mime_type, encoding, schema)
if format_ is None:
raise ValueError("Invalid format specification! mime_type=%r, "
"encoding=%r, schema=%r"%(mime_type, encoding, schema))
text_encoding = getattr(format_, 'text_encoding', 'utf-8')
fattr = {
'mime_type': format_.mime_type,
'encoding': format_.encoding,
'schema': format_.schema
}
if format_.is_xml:
parsed_data = CDObject(etree.fromstring(data), **fattr)
elif format_.is_json:
parsed_data = CDObject(json.loads(_unicode(data, text_encoding)), **fattr)
elif format_.is_text:
parsed_data = CDTextBuffer(_unicode(data, text_encoding), **fattr)
parsed_data.seek(0)
else: # generic binary byte-stream
parsed_data = CDByteBuffer(data, **fattr)
if format_.encoding is not None:
data_out = FastStringIO()
for chunk in format_.decode(parsed_data, **opt):
data_out.write(chunk)
parsed_data = data_out
parsed_data.seek(0)
return parsed_data
def encode_xml(self, data):
""" encode complex data to be embedded to an XML document"""
mime_type = getattr(data, 'mime_type', None)
encoding = getattr(data, 'encoding', None)
schema = getattr(data, 'schema', None)
format_ = self.get_format(mime_type, encoding, schema)
if format_ is None:
raise ValueError("Invalid format specification! mime_type=%r, "
"encoding=%r, schema=%r"%(mime_type, encoding, schema))
if not format_.allows_xml_embedding:
raise ValueError("Selected format does not allows XML embedding! "
"mime_type=%r, encoding=%r, schema=%r"%(
mime_type, encoding, schema))
if isinstance(data, CDObject):
data = data.data
if format_.is_xml:
if isinstance(data, etree._ElementTree):
data = data.getroot()
return deepcopy(data)
elif format_.is_json:
return json.dumps(data, ensure_ascii=False)
elif format_.is_text:
if not isinstance(data, basestring):
data.seek(0)
data = data.read()
return data
else: # generic binary byte-stream
if format_.encoding is not None:
data.seek(0)
data_out = FastStringIO()
for chunk in format_.encode(data):
data_out.write(chunk)
data = data_out
data.seek(0)
return data.read()
def encode_raw(self, data):
""" encode complex data for raw output """
def _rewind(fid):
if hasattr(fid, 'seek'):
fid.seek(0)
return data
mime_type = getattr(data, 'mime_type', None)
encoding = getattr(data, 'encoding', None)
schema = getattr(data, 'schema', None)
format_ = self.get_format(mime_type, encoding, schema)
text_encoding = getattr(format_, 'text_encoding', 'utf-8')
if format_ is None:
raise ValueError("Invalid format specification! mime_type=%r, "
"encoding=%r, schema=%r"%(mime_type, encoding, schema))
if isinstance(data, CDObject):
data = data.data
if format_.is_xml:
data = FastStringIO(etree.tostring(data, pretty_print=False,
xml_declaration=True, encoding=text_encoding))
content_type = "%s; charset=%s"%(format_.mime_type, text_encoding)
elif format_.is_json:
data = FastStringIO(json.dumps(data, ensure_ascii=False).encode(text_encoding))
content_type = "%s; charset=%s"%(format_.mime_type, text_encoding)
elif format_.is_text:
if isinstance(data, (CDTextBuffer, CDAsciiTextBuffer)):
data.text_encoding = text_encoding
else:
data = FastStringIO(_rewind(data).read().encode(text_encoding))
content_type = "%s; charset=%s"%(format_.mime_type, text_encoding)
else: # generic binary byte-stream
if format_.encoding is not None:
data_out = FastStringIO()
for chunk in format_.encode(_rewind(data)):
data_out.write(chunk)
data = data_out
content_type = format_.mime_type
return _rewind(data), content_type
def _bytestring(data):
if isinstance(data, str):
return data
raise TypeError("Byte string expected, %s received!"%type(data))
def _unicode(data, encoding):
if isinstance(data, unicode):
return data
elif isinstance(data, str):
return unicode(data, encoding)
raise TypeError("Byte od unicode string expected, %s received!"%type(data))
| 37.923913
| 91
| 0.602178
|
4b567eca852dd7e2aa70a46416b97222852e8651
| 292
|
py
|
Python
|
onlinecourse/migrations/0003_delete_question.py
|
spacemc2/ProjectFinalDjango
|
0754120fd1ac742582acafdd77ed66026ab90104
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0003_delete_question.py
|
spacemc2/ProjectFinalDjango
|
0754120fd1ac742582acafdd77ed66026ab90104
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0003_delete_question.py
|
spacemc2/ProjectFinalDjango
|
0754120fd1ac742582acafdd77ed66026ab90104
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2022-01-07 02:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0002_question'),
]
operations = [
migrations.DeleteModel(
name='Question',
),
]
| 17.176471
| 47
| 0.599315
|
5cf1d1c6ed77517db3b301c5517161288f9c0892
| 3,621
|
py
|
Python
|
fyplot/dict_plot.py
|
intelligent-soft-robots/fyplot
|
642ba620105ee2a177cd82d147561d235bf30ad6
|
[
"BSD-3-Clause"
] | null | null | null |
fyplot/dict_plot.py
|
intelligent-soft-robots/fyplot
|
642ba620105ee2a177cd82d147561d235bf30ad6
|
[
"BSD-3-Clause"
] | null | null | null |
fyplot/dict_plot.py
|
intelligent-soft-robots/fyplot
|
642ba620105ee2a177cd82d147561d235bf30ad6
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright(c) 2020 Max Planck Gesellschaft
# Author: Vincent Berenz
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
from collections import deque
import threading,math,time
pg.setConfigOptions(antialias=True)
class Config:
def __init__(self):
self.channels = []
self.slots = {}
self.slots_colors = {}
self.data_size = 200
self.title = "untitled"
self.windows_size = [800,800]
self.start = False
self.limits = {}
_APPLICATION = QtGui.QApplication([])
_WIN = pg.GraphicsWindow(title="pasta")
_WIN.resize(1,1)
_WIN.setWindowTitle("")
_FIRST_ITERATION = True
_CHANNELS = {}
_PLOTS = {} # key : channel , value : {slot:(plot,curve)}
_CONFIG = None
class _channel_data:
def __init__(self,slots,data_size,y_range=[-math.pi,+math.pi]):
self.data = {slot:deque([0 for _ in range(data_size)],data_size) for slot in slots }
for slot in slots:
self.data[slot][-2] = y_range[0]
self.data[slot][-1] = y_range[1]
self.lock = threading.Lock()
def update(self,values):
with self.lock:
for slot,value in values.items():
self.data[slot].append(value)
def get(self):
with self.lock:
r = {}
for slot,queue in self.data.items():
r[slot] = [v for v in queue]
return r
def _init():
timer.timeout.connect(_update_plot)
global _CONFIG,_CHANNELS,_PLOTS,_WIN
_WIN.resize(_CONFIG.windows_size[0],_CONFIG.windows_size[1])
_WIN.setWindowTitle(_CONFIG.title)
if isinstance(_CONFIG.channels[0],str):
_CONFIG.channels = [_CONFIG.channels]
all_channels = []
for channel_set in _CONFIG.channels:
all_channels.extend(channel_set)
def _get_y_range(channel):
try : return _CONFIG.limits[channel]
except : return None
_CHANNELS = {channel:_channel_data(_CONFIG.slots[channel],
_CONFIG.data_size,
y_range=_get_y_range(channel))
for channel in all_channels}
for channel_set in _CONFIG.channels:
for channel in channel_set:
p = _WIN.addPlot(title=channel)
data = _CHANNELS[channel].get()
curves = {}
for slot in _CONFIG.slots[channel]:
curve = p.plot(pen=_CONFIG.slots_colors[slot],name=slot)
curves[slot]=(p,curve)
_PLOTS[channel]=curves
_WIN.nextRow()
_CONFIG.start = True
def set_data(data):
global _CHANNELS
for channel,d in data.items():
_CHANNELS[channel].update(d)
def _update_plot():
global _CHANNELS,_FIRST_ITERATION,_PLOTS,win
if not _CONFIG.start : return
for channel in _CHANNELS.keys():
curves = _PLOTS[channel]
data = _CHANNELS[channel].get()
for slot in _CONFIG.slots[channel]:
plot,curve = curves[slot]
d = data[slot]
curve.setData(d)
if _FIRST_ITERATION : plot.enableAutoRange('xy', False)
_FIRST_ITERATION = False
timer = QtCore.QTimer()
timer.start(50)
def _start(target_function):
_init()
t = threading.Thread(target=target_function)
t.start()
def start_plotting(config,target_function):
global _CONFIG
_CONFIG = config
_start(target_function)
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 25.864286
| 92
| 0.605634
|
a24937bab183e54df40e931f2f4621ec22ea83c4
| 18,011
|
py
|
Python
|
src/test/py/bazel/test_wrapper_test.py
|
sevki/bazel
|
b18915752a69fbbc6ed94e1710198167593565fc
|
[
"Apache-2.0"
] | null | null | null |
src/test/py/bazel/test_wrapper_test.py
|
sevki/bazel
|
b18915752a69fbbc6ed94e1710198167593565fc
|
[
"Apache-2.0"
] | null | null | null |
src/test/py/bazel/test_wrapper_test.py
|
sevki/bazel
|
b18915752a69fbbc6ed94e1710198167593565fc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import zipfile
from src.test.py.bazel import test_base
class TestWrapperTest(test_base.TestBase):
@staticmethod
def _ReadFile(path):
# Read the runfiles manifest.
contents = []
with open(path, 'rt') as f:
contents = [line.strip() for line in f.readlines()]
return contents
def _FailWithOutput(self, output):
self.fail('FAIL:\n | %s\n---' % '\n | '.join(output))
def _CreateMockWorkspace(self):
self.ScratchFile('WORKSPACE')
# All test targets are called <something>.bat, for the benefit of Windows.
# This makes test execution faster on Windows for the following reason:
#
# When building a sh_test rule, the main output's name is the same as the
# rule. On Unixes, this output is a symlink to the main script (the first
# entry in `srcs`), on Windows it's a copy of the file. In fact the main
# "script" does not have to be a script, it may be any executable file.
#
# On Unixes anything with the +x permission can be executed; the file's
# shebang line specifies the interpreter. On Windows, there's no such
# mechanism; Bazel runs the main script (which is typically a ".sh" file)
# through Bash. However, if the main file is a native executable, it's
# faster to run it directly than through Bash (plus it removes the need for
# Bash).
#
# Therefore on Windows, if the main script is a native executable (such as a
# ".bat" file) and has the same extension as the main file, Bazel (in case
# of sh_test) makes a copy of the file and runs it directly, rather than
# through Bash.
self.ScratchFile('foo/BUILD', [
'sh_test(',
' name = "passing_test.bat",',
' srcs = ["passing.bat"],',
')',
'sh_test(',
' name = "failing_test.bat",',
' srcs = ["failing.bat"],',
')',
'sh_test(',
' name = "printing_test.bat",',
' srcs = ["printing.bat"],',
')',
'sh_test(',
' name = "runfiles_test.bat",',
' srcs = ["runfiles.bat"],',
' data = ["passing.bat"],',
')',
'sh_test(',
' name = "sharded_test.bat",',
' srcs = ["sharded.bat"],',
' shard_count = 2,',
')',
'sh_test(',
' name = "unexported_test.bat",',
' srcs = ["unexported.bat"],',
')',
'sh_test(',
' name = "testargs_test.bat",',
' srcs = ["testargs.bat"],',
' args = ["foo", "a b", "", "bar"],',
')',
'py_test(',
' name = "undecl_test",',
' srcs = ["undecl_test.py"],',
' data = ["dummy.ico", "dummy.dat"],',
' deps = ["@bazel_tools//tools/python/runfiles"],',
')',
'py_test(',
' name = "annot_test",',
' srcs = ["annot_test.py"],',
')',
])
self.ScratchFile('foo/passing.bat', ['@exit /B 0'], executable=True)
self.ScratchFile('foo/failing.bat', ['@exit /B 1'], executable=True)
self.ScratchFile(
'foo/printing.bat', [
'@echo lorem ipsum',
'@echo HOME=%HOME%',
'@echo TEST_SRCDIR=%TEST_SRCDIR%',
'@echo TEST_TMPDIR=%TEST_TMPDIR%',
'@echo USER=%USER%',
],
executable=True)
self.ScratchFile(
'foo/runfiles.bat', [
'@echo MF=%RUNFILES_MANIFEST_FILE%',
'@echo ONLY=%RUNFILES_MANIFEST_ONLY%',
'@echo DIR=%RUNFILES_DIR%',
],
executable=True)
self.ScratchFile(
'foo/sharded.bat', [
'@echo STATUS=%TEST_SHARD_STATUS_FILE%',
'@echo INDEX=%TEST_SHARD_INDEX% TOTAL=%TEST_TOTAL_SHARDS%',
],
executable=True)
self.ScratchFile(
'foo/unexported.bat', [
'@echo GOOD=%HOME%',
'@echo BAD=%TEST_UNDECLARED_OUTPUTS_MANIFEST%',
],
executable=True)
self.ScratchFile(
'foo/testargs.bat',
[
'@echo arg=(%~nx0)', # basename of $0
'@echo arg=(%1)',
'@echo arg=(%2)',
'@echo arg=(%3)',
'@echo arg=(%4)',
'@echo arg=(%5)',
'@echo arg=(%6)',
'@echo arg=(%7)',
'@echo arg=(%8)',
'@echo arg=(%9)',
],
executable=True)
# A single white pixel as an ".ico" file. /usr/bin/file should identify this
# as "image/x-icon".
# The MIME type lookup logic of the test wrapper only looks at file names,
# but the test-setup.sh calls /usr/bin/file which inspects file contents, so
# we need a valid ".ico" file.
ico_file = bytearray([
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00,
0x18, 0x00, 0x30, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x28, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00
])
# 16 bytes of random data. /usr/bin/file should identify this as
# "application/octet-stream".
# The MIME type lookup logic of the test wrapper only looks at file names,
# but the test-setup.sh calls /usr/bin/file which inspects file contents, so
# we need a valid ".ico" file.
dat_file = bytearray([
0x40, 0x5a, 0x2e, 0x7e, 0x53, 0x86, 0x98, 0x0e, 0x12, 0xc4, 0x92, 0x38,
0x27, 0xcd, 0x09, 0xf9
])
ico_file_path = self.ScratchFile('foo/dummy.ico').replace('/', '\\')
dat_file_path = self.ScratchFile('foo/dummy.dat').replace('/', '\\')
with open(ico_file_path, 'wb') as f:
f.write(ico_file)
with open(dat_file_path, 'wb') as f:
f.write(dat_file)
self.ScratchFile(
'foo/undecl_test.py', [
'from bazel_tools.tools.python.runfiles import runfiles',
'import os',
'import shutil',
'',
'root = os.environ.get("TEST_UNDECLARED_OUTPUTS_DIR")',
'os.mkdir(os.path.join(root, "out1"))',
'os.mkdir(os.path.join(root, "out2"))',
'os.makedirs(os.path.join(root, "empty/sub"))',
'r = runfiles.Create()',
'shutil.copyfile(r.Rlocation("__main__/foo/dummy.ico"),',
' os.path.join(root, "out1", "data1.ico"))',
'shutil.copyfile(r.Rlocation("__main__/foo/dummy.dat"),',
' os.path.join(root, "out2", "data2.dat"))',
],
executable=True)
self.ScratchFile(
'foo/annot_test.py', [
'import os',
'root = os.environ.get("TEST_UNDECLARED_OUTPUTS_ANNOTATIONS_DIR")',
'dir1 = os.path.join(root, "out1")',
'dir2 = os.path.join(root, "out2.part")',
'os.mkdir(dir1)',
'os.mkdir(dir2)',
'with open(os.path.join(root, "a.part"), "wt") as f:',
' f.write("Hello a")',
'with open(os.path.join(root, "b.txt"), "wt") as f:',
' f.write("Hello b")',
'with open(os.path.join(root, "c.part"), "wt") as f:',
' f.write("Hello c")',
'with open(os.path.join(dir1, "d.part"), "wt") as f:',
' f.write("Hello d")',
'with open(os.path.join(dir2, "e.part"), "wt") as f:',
' f.write("Hello e")',
],
executable=True)
def _AssertPassingTest(self, flag):
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:passing_test.bat',
'-t-',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
def _AssertFailingTest(self, flag):
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:failing_test.bat',
'-t-',
flag,
])
self.AssertExitCode(exit_code, 3, stderr)
def _AssertPrintingTest(self, flag):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:printing_test.bat',
'-t-',
'--test_output=all',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
lorem = False
for line in stderr + stdout:
if line.startswith('lorem ipsum'):
lorem = True
elif line.startswith('HOME='):
home = line[len('HOME='):]
elif line.startswith('TEST_SRCDIR='):
srcdir = line[len('TEST_SRCDIR='):]
elif line.startswith('TEST_TMPDIR='):
tmpdir = line[len('TEST_TMPDIR='):]
elif line.startswith('USER='):
user = line[len('USER='):]
if not lorem:
self._FailWithOutput(stderr + stdout)
if not home:
self._FailWithOutput(stderr + stdout)
if not os.path.isabs(home):
self._FailWithOutput(stderr + stdout)
if not os.path.isdir(srcdir):
self._FailWithOutput(stderr + stdout)
if not os.path.isfile(os.path.join(srcdir, 'MANIFEST')):
self._FailWithOutput(stderr + stdout)
if not os.path.isabs(srcdir):
self._FailWithOutput(stderr + stdout)
if not os.path.isdir(tmpdir):
self._FailWithOutput(stderr + stdout)
if not os.path.isabs(tmpdir):
self._FailWithOutput(stderr + stdout)
if not user:
self._FailWithOutput(stderr + stdout)
def _AssertRunfiles(self, flag):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:runfiles_test.bat',
'-t-',
'--test_output=all',
# Ensure Bazel does not create a runfiles tree.
'--enable_runfiles=no',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
mf = mf_only = rf_dir = None
for line in stderr + stdout:
if line.startswith('MF='):
mf = line[len('MF='):]
elif line.startswith('ONLY='):
mf_only = line[len('ONLY='):]
elif line.startswith('DIR='):
rf_dir = line[len('DIR='):]
if mf_only != '1':
self._FailWithOutput(stderr + stdout)
if not os.path.isfile(mf):
self._FailWithOutput(stderr + stdout)
mf_contents = TestWrapperTest._ReadFile(mf)
# Assert that the data dependency is listed in the runfiles manifest.
if not any(
line.split(' ', 1)[0].endswith('foo/passing.bat')
for line in mf_contents):
self._FailWithOutput(mf_contents)
if not os.path.isdir(rf_dir):
self._FailWithOutput(stderr + stdout)
def _AssertShardedTest(self, flag):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:sharded_test.bat',
'-t-',
'--test_output=all',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
status = None
index_lines = []
for line in stderr + stdout:
if line.startswith('STATUS='):
status = line[len('STATUS='):]
elif line.startswith('INDEX='):
index_lines.append(line)
if not status:
self._FailWithOutput(stderr + stdout)
# Test test-setup.sh / test wrapper only ensure that the directory of the
# shard status file exist, not that the file itself does too.
if not os.path.isdir(os.path.dirname(status)):
self._FailWithOutput(stderr + stdout)
if sorted(index_lines) != ['INDEX=0 TOTAL=2', 'INDEX=1 TOTAL=2']:
self._FailWithOutput(stderr + stdout)
def _AssertUnexportsEnvvars(self, flag):
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:unexported_test.bat',
'-t-',
'--test_output=all',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
good = bad = None
for line in stderr + stdout:
if line.startswith('GOOD='):
good = line[len('GOOD='):]
elif line.startswith('BAD='):
bad = line[len('BAD='):]
if not good or bad:
self._FailWithOutput(stderr + stdout)
def _AssertTestArgs(self, flag, expected):
exit_code, bazel_bin, stderr = self.RunBazel(['info', 'bazel-bin'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = bazel_bin[0]
exit_code, stdout, stderr = self.RunBazel([
'test',
'//foo:testargs_test.bat',
'-t-',
'--test_output=all',
'--test_arg=baz',
'--test_arg="x y"',
'--test_arg=""',
'--test_arg=qux',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
actual = []
for line in stderr + stdout:
if line.startswith('arg='):
actual.append(str(line[len('arg='):]))
self.assertListEqual(expected, actual)
def _AssertUndeclaredOutputs(self, flag):
exit_code, bazel_testlogs, stderr = self.RunBazel(
['info', 'bazel-testlogs'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_testlogs = bazel_testlogs[0]
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:undecl_test',
'-t-',
'--test_output=errors',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
undecl_zip = os.path.join(bazel_testlogs, 'foo', 'undecl_test',
'test.outputs', 'outputs.zip')
self.assertTrue(os.path.exists(undecl_zip))
zip_content = {}
with zipfile.ZipFile(undecl_zip, 'r') as z:
zip_content = {f: z.getinfo(f).file_size for f in z.namelist()}
self.assertDictEqual(
zip_content, {
'out1/': 0,
'out2/': 0,
'empty/': 0,
'empty/sub/': 0,
'out1/data1.ico': 70,
'out2/data2.dat': 16
})
undecl_mf = os.path.join(bazel_testlogs, 'foo', 'undecl_test',
'test.outputs_manifest', 'MANIFEST')
self.assertTrue(os.path.exists(undecl_mf))
mf_content = []
with open(undecl_mf, 'rt') as f:
mf_content = [line.strip() for line in f.readlines()]
# Using an ".ico" file as example, because as of 2018-11-09 Bazel's CI
# machines run Windows Server 2016 core which recognizes fewer MIME types
# than desktop Windows versions, and one of the recognized types is ".ico"
# files.
self.assertListEqual(mf_content, [
'out1/data1.ico\t70\timage/x-icon',
'out2/data2.dat\t16\tapplication/octet-stream'
])
def _AssertUndeclaredOutputsAnnotations(self, flag):
exit_code, bazel_testlogs, stderr = self.RunBazel(
['info', 'bazel-testlogs'])
self.AssertExitCode(exit_code, 0, stderr)
bazel_testlogs = bazel_testlogs[0]
exit_code, _, stderr = self.RunBazel([
'test',
'//foo:annot_test',
'-t-',
'--test_output=errors',
flag,
])
self.AssertExitCode(exit_code, 0, stderr)
undecl_annot = os.path.join(bazel_testlogs, 'foo', 'annot_test',
'test.outputs_manifest', 'ANNOTATIONS')
self.assertTrue(os.path.exists(undecl_annot))
annot_content = []
with open(undecl_annot, 'rt') as f:
annot_content = [line.strip() for line in f.readlines()]
self.assertListEqual(annot_content, ['Hello aHello c'])
def testTestExecutionWithTestSetupSh(self):
self._CreateMockWorkspace()
flag = '--noincompatible_windows_native_test_wrapper'
self._AssertPassingTest(flag)
self._AssertFailingTest(flag)
self._AssertPrintingTest(flag)
self._AssertRunfiles(flag)
self._AssertShardedTest(flag)
self._AssertUnexportsEnvvars(flag)
self._AssertTestArgs(
flag,
[
'(testargs_test.bat)',
'(foo)',
'(a)',
'(b)',
'(bar)',
# Note: debugging shows that test-setup.sh receives more-or-less
# good arguments (let's ignore issues #6276 and #6277 for now), but
# mangles the last few.
# I (laszlocsomor@) don't know the reason (as of 2018-10-01) but
# since I'm planning to phase out test-setup.sh on Windows in favor
# of the native test wrapper, I don't intend to debug this further.
# The test is here merely to guard against unwanted future change of
# behavior.
'(baz)',
'("\\"x)',
'(y\\"")',
'("\\\\\\")',
'(qux")'
])
self._AssertUndeclaredOutputs(flag)
self._AssertUndeclaredOutputsAnnotations(flag)
def testTestExecutionWithTestWrapperExe(self):
self._CreateMockWorkspace()
flag = '--incompatible_windows_native_test_wrapper'
self._AssertPassingTest(flag)
self._AssertFailingTest(flag)
self._AssertPrintingTest(flag)
self._AssertRunfiles(flag)
self._AssertShardedTest(flag)
self._AssertUnexportsEnvvars(flag)
self._AssertTestArgs(
flag,
[
'(testargs_test.bat)',
'(foo)',
# TODO(laszlocsomor): assert that "a b" is passed as one argument,
# not two, after https://github.com/bazelbuild/bazel/issues/6277
# is fixed.
'(a)',
'(b)',
# TODO(laszlocsomor): assert that the empty string argument is
# passed, after https://github.com/bazelbuild/bazel/issues/6276
# is fixed.
'(bar)',
'(baz)',
'("x y")',
'("")',
'(qux)',
'()'
])
self._AssertUndeclaredOutputs(flag)
self._AssertUndeclaredOutputsAnnotations(flag)
if __name__ == '__main__':
unittest.main()
| 34.972816
| 80
| 0.574149
|
ef1f2e30baf75627f1814f8e6ce17027a9bb0977
| 187,767
|
py
|
Python
|
Mini-Project-III/Script/Neural Networks Mini Project-II.py
|
cankocagil/Neural-Networks
|
5cb51ccb7dcc8afd30c5745111a87498ec38e006
|
[
"MIT"
] | null | null | null |
Mini-Project-III/Script/Neural Networks Mini Project-II.py
|
cankocagil/Neural-Networks
|
5cb51ccb7dcc8afd30c5745111a87498ec38e006
|
[
"MIT"
] | null | null | null |
Mini-Project-III/Script/Neural Networks Mini Project-II.py
|
cankocagil/Neural-Networks
|
5cb51ccb7dcc8afd30c5745111a87498ec38e006
|
[
"MIT"
] | null | null | null |
# Necessary imports :
import numpy as np
import matplotlib.pyplot as plt
import h5py
# %%
# Necessary imports :
#import numpy as np
#import matplotlib.pyplot as plt
#import h5py
import math
import pandas as pd
import seaborn as sns
import sys
question = input('Please enter question number 1/3 :')
def can_kocagil_21602218_hw3(question):
if question == '1' :
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# %%
def get_data(path) -> dict :
"""
Given the path of the dataset, return
training and testing images with respective
labels.
"""
with h5py.File(path,'r') as F:
# Names variable contains the names of training and testing file
names = list(F.keys())
data = np.array(F[names[0]][()])
invXForm = np.array(F[names[1]][()])
xForm = np.array(F[names[2]][()])
return {'data' : data,
'invXForm': invXForm,
'xForm' : xForm}
path = 'assign3_data1.h5'
data_h5 = get_data(path)
# %%
data = data_h5['data']
invXForm = data_h5['invXForm']
xForm = data_h5['xForm']
# %%
print(f'The data has a shape: {data.shape}')
# %%
data = np.swapaxes(data,1,3)
# %%
print(f'The data has a shape: {data.shape}')
# %%
class ImagePreprocessing:
"""
_____Image preprocessor_____
Functions :
--- ToGray(data)
-Takes an input image then converts to gray scale by Luminosity Model
--- MeanRemoval(data)
-Extracking the mean of each image themselves
--- ClipStd(data)
- Clipping the input image within given condition
--- Normalize(data,min_scale,max_scale)
- Normalizing input image to [min_scale,max_scale]
--- Flatten(data)
- Flattening input image
"""
def __init__(self):
pass
def ToGray(self,data):
"""
Given the input image converting gray scale according to luminosity model
"""
R_linear = 0.2126
G_linear = 0.7152
B_linear = 0.0722
gray_data = (data[:,:,:,0] * R_linear) + (data[:,:,:,1] * G_linear) + (data[:,:,:,2] * B_linear)
return gray_data
def MeanRemoval(self,data):
"""
Given the input image, substracking the mean of pixel intensity of each image
"""
axis = (1,2)
mean_pixel = np.mean(data,axis = axis)
num_samples = data.shape[0]
# Substracking means of each image seperately :
for i in range(num_samples):
data[i] -= mean_pixel[i]
return data
def ClipStd(self,data,std_scaler):
"""
Given the data and range of standart deviation scaler,
return clipped data
"""
std_pixel = np.std(data)
min_cond = - std_scaler * std_pixel
max_cond = std_scaler * std_pixel
clipped_data = np.clip(data,min_cond,max_cond)
return clipped_data
def Normalize(self,data,min_scale,max_scale):
"""
Given the data, normalize to given interval [min_val,max_val]
"""
min = data.max()
max = data.min()
# First normalize in [0,1]
norm_data = (data - min) / (max-min)
# Normalizing in [min_scale,max_scale]
range = max_scale - min_scale
interval_scaled_data = (norm_data * range) + min_scale
return interval_scaled_data
def Flatten(self,data):
"""
Given the input image data returning flattened version of the data
"""
num_samples = data.shape[0]
flatten = data.reshape(num_samples,-1)
return flatten
# %%
# Defining preprocessor :
preprocessor = ImagePreprocessing()
# %%
# Converting gray scale :
gray_data = preprocessor.ToGray(data = data)
# %%
# Mean removing :
mean_removed_data = preprocessor.MeanRemoval(data = gray_data)
# %%
# Standart deviation clipping :
clipped_data = preprocessor.ClipStd(data = mean_removed_data,std_scaler = 3)
# %%
# Normalized data
data_processed = preprocessor.Normalize(data = clipped_data, min_scale = 0.1, max_scale = 0.9)
# %%
print(f' Maximum val of data : {data_processed.max()}')
print(f' Minimum val of data : {data_processed.min()}')
# %%
def plot_patches(data,num_patches, cmap = 'viridis'):
np.random.seed(15)
num_samples = data.shape[0]
random_indexes = np.random.randint(num_samples,size = num_patches)
plt.figure(figsize = (18,16))
for i in range(num_patches):
plt.subplot(20,20,i+1)
plt.imshow(data[random_indexes[i]],cmap = cmap)
plt.axis('off')
plt.show()
# %%
plot_patches(preprocessor.Normalize(data = data, min_scale = 0, max_scale = 1),num_patches = 200)
# %%
#plot_patches(data,num_patches = 200)
# %%
plot_patches(data_processed,num_patches = 200, cmap = 'gray')
# %%
class Autoencoder:
"""
____Autoencoder____
Functions :
--- __init__(input_size,hidden_size)
- Building overall architecture of the model
--- InitParams(input_size,hidden_size)
- Initializing configurable parameters
--- aeCost(W,data,params)
- Calculating cost and it's derivatives
--- Forward(X)
- Forward pass
--- Backward(X)
- Calculation of gradients w.r.t. loss function
--- KL_divergence()
- Calculate KL divergence and it's gradients
--- TykhonowRegulator(X,grad)
- Computing Tykhonow regularization term and it's gradient
--- Predict(X)
- To make predictions
--- Sigmoid(X, grad)
- Compute sigmoidal activation and it's gradients
--- History()
- To keep track history of the model
"""
def __init__(self,input_size,hidden_size,lambd):
"""
Construction of the architecture of the autoencoder
"""
np.random.seed(1500)
self.lambd = lambd
self.beta = 1e-1
self.rho = 5e-2
self.learning_rate = 9e-1
self.params = {'L_in' : input_size,
'L_hidden' : hidden_size,
'Lambda' : self.lambd,
'Beta' : self.beta,
'Rho' : self.rho}
self.W_e = self.InitParams(input_size,hidden_size)
self.loss = []
def InitParams(self,input_size,hidden_size):
"""
Given the size of the input node and hidden node, initialize the weights
drawn from uniform distribution ~ Uniform[- sqrt(6/(L_pre + L_post)) , sqrt(6/(L_pre + L_post))]
"""
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = input_size
W1_high = self.w_o(input_size,hidden_size)
W1_low = - W1_high
W1_size = (input_size,hidden_size)
self.W1 = np.random.uniform(W1_low,W1_high,size = W1_size)
B1_size = (1,hidden_size)
self.B1 = np.random.uniform(W1_low,W1_high,size = B1_size)
W2_high = self.w_o(hidden_size,self.output_size)
W2_low = - W2_high
W2_size = (hidden_size,self.output_size)
self.W2 = np.random.uniform(W2_low,W2_high,size = W2_size)
B2_size = (1,self.output_size)
self.B2 = np.random.uniform(W1_low,W1_high,size = B2_size)
return {'W1' : self.W1,
'W2' : self.W2,
'B1' : self.B1,
'B2' : self.B2}
def w_o(self,L_pre,L_post):
return np.sqrt(6/(L_pre + L_post))
def sigmoid(self,X, grad = True):
"""
Computing sigmoid and it's gradient w.r.t. it's input
"""
sig = 1/(1 + np.exp(-X))
return sig * (1-sig) if grad else sig
def forward(self,X):
"""
Forward propagation
"""
W1 = self.W_e['W1']
W2 = self.W_e['W2']
B1 = self.W_e['B1']
B2 = self.W_e['B2']
Z1 = np.dot(X,W1) + B1
A1 = self.sigmoid(Z1,grad = False)
Z2 = np.dot(A1,W2) + B2
A2 = self.sigmoid(Z2,grad = False)
return {"Z1": Z1,"A1": A1,
"Z2": Z2,"A2": A2}
def total_loss(self,outs,label):
W1 = self.W_e['W1']
W2 = self.W_e['W2']
Lambda = self.params['Lambda']
beta = self.params['Beta']
rho = self.params['Rho']
J_mse = self.MSE(outs['A2'],label, grad = False)
J_tykhonow = self.TykhonowRegularization(W1 = W1, W2 = W2,lambd = Lambda, grad = False)
J_KL = self.KL_divergence(rho = rho,expected = np.mean(outs['A1']), beta = beta, grad = False)
return J_mse + J_tykhonow + J_KL
def MSE(self,pred,label, grad = True):
"""
Calculating Mean Sqaured Error and it's gradient w.r.t. output
"""
return 1/2 * (pred - label) if grad else 1/2 * np.sum((pred - label)**2)/pred.shape[0]
def aeCost(self,data):
outs = self.forward(data)
loss = self.total_loss(outs,data)
grads = self.backward(outs,data)
return {'J' : loss,
'J_grad' : grads}
def KL_divergence(self,rho,beta,expected,grad = True):
"""
Computing KL-divergence and it's gradients, note that gradients is only for W1
"""
return np.tile(beta * (-(rho/expected) + (1-rho)/(1-expected) ), reps = (10240,1)) if grad else beta * (np.sum((rho * np.log(rho/expected)) + ((1-rho)*np.log((1-rho)/(1-expected)))))
def TykhonowRegularization(self,W1,W2,lambd,grad = True):
"""
L2 based regularization computing and it's gradients
"""
return {'dW1': lambd * W1, 'dW2': lambd * W2} if grad else (lambd/2) * (np.sum(W1**2) + np.sum(W2**2))
def backward(self,outs,data):
"""
Given the forward pass outputs, input and their labels,
returning gradients w.r.t. loss functions
"""
m = data.shape[0]
Lambda = self.params['Lambda']
beta = self.params['Beta']
rho = self.params['Rho']
W1 = self.W_e['W1']
W2 = self.W_e['W2']
B1 = self.W_e['B1']
B2 = self.W_e['B2']
Z1 = outs['Z1']
A1 = outs['A1']
Z2 = outs['Z2']
A2 = outs['A2']
L2_grad = self.TykhonowRegularization(W1,W2,lambd = Lambda , grad = True)
KL_grad_W1 = self.KL_divergence(rho,beta,expected = np.mean(A1),grad = True)
dZ2 = self.MSE(A2,data, grad = True) * self.sigmoid(Z2, grad = True)
dW2 = (1/m) * (np.dot(A1.T,dZ2) + L2_grad['dW2'])
dB2 = (1/m) * (np.sum(dZ2, axis=0, keepdims=True))
dZ1 = (np.dot(dZ2,W2.T) + KL_grad_W1) * self.sigmoid(Z1,grad = True)
dW1 = (1/m) * (np.dot(data.T,dZ1) + L2_grad['dW1'])
dB1 = (1/m) * (np.sum(dZ1, axis=0, keepdims=True))
assert (dW1.shape == W1.shape and dW2.shape == W2.shape)
return {"dW1": dW1, "dW2": dW2,
"dB1": dB1, "dB2": dB2}
def fit(self,data,epochs = 5000,verbose = True):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
loss_and_grads = self.aeCost(data)
self.step(grads = loss_and_grads['J_grad'])
if verbose:
print(f"[{epoch}/{epochs}] ----------> Loss :{loss_and_grads['J']}")
self.loss.append(loss_and_grads['J'])
def step(self,grads):
"""
Updating configurable parameters according to full-batch stochastic gradient update rule
"""
self.W_e['W1'] += -self.learning_rate * grads['dW1']
self.W_e['W2'] += -self.learning_rate * grads['dW2']
self.W_e['B1'] += -self.learning_rate * grads['dB1']
self.W_e['B2'] += -self.learning_rate * grads['dB2']
self.learning_rate *= 0.9999
def evaluate(self):
plt.plot(self.loss, color = 'orange')
plt.xlabel(' # of Epochs')
plt.ylabel('Loss')
plt.title('Training Loss versus Epochs')
plt.legend([f'Loss : {self.loss[-1]}'])
def display_weights(self):
"""
Display weights as a image for feature representation
"""
W1 = self.W_e['W1']
num_disp = W1.shape[1]
fig = plt.figure(figsize = (9,8))
for i in range(num_disp):
plt.subplot(8,8,i+1)
plt.imshow(W1.T[i].reshape(16,16),cmap = 'gray')
plt.axis('off')
fig.suptitle('Hidden Layer Feature Representation')
plt.show()
def display_outputs(self,output,data,num = 4):
"""
Displaying outputs, please give only sqaured values, i.e., 1,4,16,...
"""
random_indexes = np.random.randint(output.shape[0],size = num)
plt.figure(figsize=(12, 4))
for i in range(len(random_indexes)):
ax = plt.subplot(2,5,i+1)
plt.imshow(output[random_indexes[i]].reshape(16,16),cmap = 'gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.title("Reconstructed Image")
#plt.axis('off')
ax = plt.subplot(2, 5, i + 1 + 5)
plt.imshow(data[random_indexes[i]].reshape(16,16),cmap = 'gray')
plt.title("Original Image")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def parameters(self):
"""
Returns configurable parameters
"""
return self.W_e
def history(self):
return {'Loss' : self.loss}
# %%
class Solver:
"""
Given as input, A Solver encapsulates all the logic necessary for training then
implement gradients solver to minimize the cost.The Solver performs stochastic gradient descent.
"""
def __init__(self, model,data):
self.model = model
self.data = data
def train(self,epochs = 5000,verbose = False):
"""
Optimization of the model by minimizing cost by solving gradients
"""
self.model.fit(self.data,epochs,verbose)
def parameters(self):
"""
Returning configurable parameters of the network
"""
return self.model.parameters()
# %%
data_feed = preprocessor.Flatten(data_processed)
input_size = data_feed.shape[1]
hidden_size = 64
autoencoder = Autoencoder(input_size = input_size, hidden_size = hidden_size,lambd = 5e-4)
# %%
solver = Solver(model = autoencoder, data = data_feed)
solver.train(verbose = True)
# %%
net_params = solver.parameters()
net_history = autoencoder.history()
# %%
autoencoder.evaluate()
# %%
autoencoder.display_weights()
# %%
preds = autoencoder.forward(data_feed)
autoencoder.display_outputs(preds['A2'],data_feed)
# %%
hidden_size_1 = 10
lambd_1 = 0
autoencoder_1 = Autoencoder(input_size = input_size, hidden_size = hidden_size_1, lambd = lambd_1)
solver_1 = Solver(model = autoencoder_1, data = data_feed)
solver_1.train()
autoencoder_1.evaluate()
autoencoder_1.display_weights()
preds_1 = autoencoder_1.forward(data_feed)
autoencoder_1.display_outputs(preds_1['A2'],data_feed)
# %%
hidden_size_2 = 10
lambd_2 = 1e-3
autoencoder_2 = Autoencoder(input_size = input_size, hidden_size = hidden_size_2, lambd = lambd_2)
solver_2 = Solver(model = autoencoder_2, data = data_feed)
solver_2.train()
autoencoder_2.evaluate()
autoencoder_2.display_weights()
preds_2 = autoencoder_2.forward(data_feed)
autoencoder_2.display_outputs(preds_2['A2'],data_feed)
# %%
hidden_size_3 = 10
lambd_3 = 1e-5
autoencoder_3 = Autoencoder(input_size = input_size, hidden_size = hidden_size_3, lambd = lambd_3)
solver_3 = Solver(model = autoencoder_3, data = data_feed)
solver_3.train()
autoencoder_3.evaluate()
autoencoder_3.display_weights()
preds_3 = autoencoder_3.forward(data_feed)
autoencoder_3.display_outputs(preds_3['A2'],data_feed)
# %%
hidden_size_4 = 50
lambd_4 = 0
autoencoder_4 = Autoencoder(input_size = input_size, hidden_size = hidden_size_4, lambd = lambd_4)
solver_4 = Solver(model = autoencoder_4, data = data_feed)
solver_4.train()
autoencoder_4.evaluate()
autoencoder_4.display_weights()
preds_4 = autoencoder_4.forward(data_feed)
autoencoder_4.display_outputs(preds_4['A2'],data_feed)
# %%
hidden_size_5 = 50
lambd_5 = 1e-3
autoencoder_5 = Autoencoder(input_size = input_size, hidden_size = hidden_size_5, lambd = lambd_5)
solver_5 = Solver(model = autoencoder_5, data = data_feed)
solver_5.train()
autoencoder_5.evaluate()
autoencoder_5.display_weights()
preds_5 = autoencoder_5.forward(data_feed)
autoencoder_5.display_outputs(preds_5['A2'],data_feed)
# %%
autoencoder_5.display_outputs(preds_5['A2'],data_feed)
# %%
hidden_size_6 = 50
lambd_6 = 1e-5
autoencoder_6 = Autoencoder(input_size = input_size, hidden_size = hidden_size_6, lambd = lambd_6)
solver_6 = Solver(model = autoencoder_6, data = data_feed)
solver_6.train()
autoencoder_6.evaluate()
autoencoder_6.display_weights()
preds_6 = autoencoder_6.forward(data_feed)
# %%
autoencoder_6.display_outputs(preds_6['A2'],data_feed)
# %%
hidden_size_7 = 100
lambd_7 = 0
autoencoder_7 = Autoencoder(input_size = input_size, hidden_size = hidden_size_7, lambd = lambd_7)
solver_7 = Solver(model = autoencoder_7, data = data_feed)
solver_7.train()
autoencoder_7.evaluate()
#autoencoder_7.display_weights()
preds_7 = autoencoder_7.forward(data_feed)
autoencoder_7.display_outputs(preds_7['A2'],data_feed)
# %%
#autoencoder_7.display_weights()
W1 = autoencoder_7.W_e['W1']
num_disp = W1.shape[1]
fig = plt.figure(figsize = (9,8))
for i in range(num_disp):
plt.subplot(10,10,i+1)
plt.imshow(W1.T[i].reshape(16,16),cmap = 'gray')
plt.axis('off')
fig.suptitle('Hidden Layer Feature Representation')
plt.show()
preds_7 = autoencoder_7.forward(data_feed)
autoencoder_7.display_outputs(preds_7['A2'],data_feed)
# %%
autoencoder_7.display_outputs(preds_7['A2'],data_feed)
# %%
hidden_size_8 = 100
lambd_8 = 1e-3
autoencoder_8 = Autoencoder(input_size = input_size, hidden_size = hidden_size_8, lambd = lambd_8)
solver_8 = Solver(model = autoencoder_8, data = data_feed)
solver_8.train()
autoencoder_8.evaluate()
W1 = autoencoder_8.W_e['W1']
num_disp = W1.shape[1]
fig = plt.figure(figsize = (9,8))
for i in range(num_disp):
plt.subplot(10,10,i+1)
plt.imshow(W1.T[i].reshape(16,16),cmap = 'gray')
plt.axis('off')
fig.suptitle('Hidden Layer Feature Representation')
plt.show()
preds_8 = autoencoder_8.forward(data_feed)
autoencoder_8.display_outputs(preds_8['A2'],data_feed)
# %%
hidden_size_9 = 100
lambd_9 = 1e-5
autoencoder_9 = Autoencoder(input_size = input_size, hidden_size = hidden_size_9, lambd = lambd_9)
solver_9 = Solver(model = autoencoder_9, data = data_feed)
solver_9.train()
autoencoder_9.evaluate()
W1 = autoencoder_9.W_e['W1']
num_disp = W1.shape[1]
fig = plt.figure(figsize = (9,8))
for i in range(num_disp):
plt.subplot(10,10,i+1)
plt.imshow(W1.T[i].reshape(16,16),cmap = 'gray')
plt.axis('off')
fig.suptitle('Hidden Layer Feature Representation')
plt.show()
preds_9 = autoencoder_9.forward(data_feed)
# %%
autoencoder_9.display_outputs(preds_9['A2'],data_feed)
# %%
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow import keras
import tensorflow.keras.backend as K
# %%
if tf.test.gpu_device_name():
print('Default GPU Device:{}'.format(tf.test.gpu_device_name()))
else:
print("NO GPU, that's okey")
# %%
def MeanSquaredError():
def customMeanSquaredError(pred,label):
return 1/2 * K.sum((pred - label)**2)/pred.shape[0]
return customMeanSquaredError
def KL_divergence(rho, beta):
def customKL(out):
kl1 = rho*K.log(rho/K.mean(out, axis=0))
kl2 = (1-rho)*K.log((1-rho)/(1-K.mean(out, axis=0)))
return beta*K.sum(kl1+kl2)
return customKL
# %%
def create_model(hidden_size,lambd):
tf_weights = tf_weight_initializer(inp_dim = inp_dim, hidden_dim = hidden_size)
input_img = keras.Input(shape=(inp_dim,))
encoded = layers.Dense(encoding_dim,activation='sigmoid',
kernel_regularizer=tf.keras.regularizers.l2(lambd),
activity_regularizer=KL_divergence(rho,beta),
kernel_initializer = tf_weights['W1'],
bias_initializer = tf_weights['B1'])(input_img)
decoded = layers.Dense(inp_dim,activation='sigmoid',
activity_regularizer=tf.keras.regularizers.l2(lambd),
kernel_initializer = tf_weights['W2'],
bias_initializer = tf_weights['B2'])(encoded)
tf_autoencoder = keras.Model(input_img,decoded)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.9,momentum=0,nesterov=False)
tf_autoencoder.compile(optimizer=optimizer,loss=MeanSquaredError())
return tf_autoencoder
def plot_tf_weights(W1):
num_disp = W1.shape[1]
fig = plt.figure(figsize = (9,8))
for i in range(num_disp):
plt.subplot(10,10,i+1)
plt.imshow(W1.T[i].reshape(16,16),cmap = 'gray')
plt.axis('off')
fig.suptitle('Hidden Layer Feature Representation')
plt.show()
# %%
tf_model_1 = create_model(hidden_size = 10,lambd = 0)
tf_model_1.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_1 = tf_model_1.history.history
plt.plot(tf_history_1['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
#plt.legend([f'Loss : {tf_history_1['loss'][-1]}'])
tf_preds_1 = tf_model_1.predict(data_feed)
autoencoder.display_outputs(tf_preds_1,data_feed)
tf_weights_1 = tf_model_1.get_weights()
plot_tf_weights(tf_weights_1[0])
# %%
tf_model_2 = create_model(hidden_size = 10,lambd = 1e-3)
tf_model_2.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_2 = tf_model_2.history.history
plt.plot(tf_history_2['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
tf_preds_2 = tf_model_2.predict(data_feed)
autoencoder.display_outputs(tf_preds_2,data_feed)
tf_weights_2 = tf_model_2.get_weights()
plot_tf_weights(tf_weights_2[0])
# %%
tf_model_3 = create_model(hidden_size = 10,lambd = 1e-5)
tf_model_3.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_3 = tf_model_3.history.history
plt.plot(tf_history_3['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
tf_preds_3 = tf_model_3.predict(data_feed)
autoencoder.display_outputs(tf_preds_3,data_feed)
tf_weights_3 = tf_model_3.get_weights()
plot_tf_weights(tf_weights_3[0])
# %%
tf_model_4 = create_model(hidden_size = 50,lambd = 0)
tf_model_4.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_4 = tf_model_4.history.history
plt.plot(tf_history_4['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
tf_preds_4 = tf_model_4.predict(data_feed)
autoencoder.display_outputs(tf_preds_4,data_feed)
tf_weights_4 = tf_model_4.get_weights()
plot_tf_weights(tf_weights_4[0])
# %%
tf_model_5 = create_model(hidden_size = 50,lambd = 1e-3)
tf_model_5.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_5 = tf_model_5.history.history
plt.plot(tf_history_5['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
tf_preds_5 = tf_model_5.predict(data_feed)
autoencoder.display_outputs(tf_preds_5,data_feed)
tf_weights_5 = tf_model_5.get_weights()
plot_tf_weights(tf_weights_5[0])
# %%
tf_model_6 = create_model(hidden_size = 50,lambd = 1e-5)
tf_model_6.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_6 = tf_model_6.history.history
plt.plot(tf_history_6['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
tf_preds_6 = tf_model_6.predict(data_feed)
autoencoder.display_outputs(tf_preds_6,data_feed)
tf_weights_6 = tf_model_1.get_weights()
plot_tf_weights(tf_weights_6[0])
# %%
tf_model_7 = create_model(hidden_size = 100,lambd = 0)
tf_model_7.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_7 = tf_model_7.history.history
plt.plot(tf_history_7['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
tf_preds_7 = tf_model_7.predict(data_feed)
autoencoder.display_outputs(tf_preds_7,data_feed)
tf_weights_7 = tf_model_7.get_weights()
plot_tf_weights(tf_weights_7[0])
# %%
tf_model_8 = create_model(hidden_size = 100,lambd = 1e-3)
tf_model_8.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_8 = tf_model_8.history.history
plt.plot(tf_history_8['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
tf_preds_8 = tf_model_8.predict(data_feed)
autoencoder.display_outputs(tf_preds_8,data_feed)
tf_weights_8 = tf_model_8.get_weights()
plot_tf_weights(tf_weights_8[0])
# %%
tf_model_9 = create_model(hidden_size = 100,lambd = 1e-5)
tf_model_9.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
tf_history_9 = tf_model_9.history.history
plt.plot(tf_history_9['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
tf_preds_9 = tf_model_9.predict(data_feed)
autoencoder.display_outputs(tf_preds_9,data_feed)
tf_weights_9 = tf_model_9.get_weights()
plot_tf_weights(tf_weights_9[0])
# %%
#encoding_dim = 10
rho,beta = 5e-1,1e-1
inp_dim = 256
#lamb = 0
W_scaler = lambda L_pre,L_post : np.sqrt(6/(L_pre + L_post))
def tf_weight_initializer(inp_dim,hidden_dim):
initializer_1 = tf.keras.initializers.RandomUniform(minval=-W_scaler(inp_dim,hidden_dim), maxval=W_scaler(inp_dim,hidden_dim))
values_2 = initializer_1(shape=(inp_dim,hidden_dim))
initializer_2 = tf.keras.initializers.RandomUniform(minval=-W_scaler(hidden_dim,inp_dim), maxval=W_scaler(hidden_dim,inp_dim))
values_2 = initializer_2(shape=(inp_dim,hidden_dim))
initializer_3 = tf.keras.initializers.RandomUniform(minval=-W_scaler(inp_dim,hidden_dim), maxval=W_scaler(inp_dim,hidden_dim))
values_3 = initializer_3(shape=(1,hidden_dim))
initializer_4 = tf.keras.initializers.RandomUniform(minval=-W_scaler(hidden_dim,inp_dim), maxval=W_scaler(hidden_dim,inp_dim))
values_4 = initializer_4(shape=(1,inp_dim))
return {'W1':initializer_1,
'W2':initializer_2,
'B1':initializer_3,
'B2':initializer_4}
tf_weights = tf_weight_initializer(inp_dim = inp_dim, hidden_dim = encoding_dim)
# %%
input_img = keras.Input(shape=(inp_dim,))
encoded = layers.Dense(encoding_dim,activation='sigmoid',
kernel_regularizer=tf.keras.regularizers.l2(lamb),
activity_regularizer=KL_divergence(rho,beta),
kernel_initializer = tf_weights['W1'],
bias_initializer = tf_weights['B1'])(input_img)
decoded = layers.Dense(inp_dim,activation='sigmoid',
activity_regularizer=tf.keras.regularizers.l2(lamb),
kernel_initializer = tf_weights['W2'],
bias_initializer = tf_weights['B2'])(encoded)
tf_autoencoder = keras.Model(input_img,decoded)
# %%
tf_autoencoder.summary()
# %%
optimizer = tf.keras.optimizers.SGD(learning_rate=0.9,momentum=0,nesterov=False)
tf_autoencoder.compile(optimizer=optimizer,loss=MeanSquaredError())
# %%
tf_autoencoder.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
# %%
tf_history = tf_autoencoder.history.history
plt.plot(tf_history['loss'])
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
# %%
tf_preds = tf_autoencoder.predict(data_feed)
# %%
autoencoder.display_outputs(tf_preds,data_feed)
# %%
tf_weights = tf_autoencoder.get_weights()
# %%
tf_weights = tf_autoencoder.get_weights()
W1 = tf_weights[0]
num_disp = W1.shape[1]
fig = plt.figure(figsize = (9,8))
for i in range(num_disp):
plt.subplot(8,8,i+1)
plt.imshow(W1.T[i].reshape(16,16),cmap = 'gray')
plt.axis('off')
fig.suptitle('Hidden Layer Feature Representation')
plt.show()
# %%
# %%
input_img_optim = keras.Input(shape=(inp_dim,))
encoded_optim = layers.Dense(encoding_dim,activation='sigmoid',
kernel_regularizer=tf.keras.regularizers.l2(5e-4),
activity_regularizer=KL_divergence(rho,beta))(input_img_optim)
decoded_optim = layers.Dense(inp_dim,activation='sigmoid',
activity_regularizer=tf.keras.regularizers.l2(5e-4))(encoded_optim)
tf_autoencoder_optim = keras.Model(input_img_optim,decoded_optim)
tf_autoencoder.compile(optimizer='adam',loss=MeanSquaredError())
tf_autoencoder.summary()
# %%
tf_autoencoder.fit(data_feed, data_feed,
epochs=5000,
batch_size=data_feed.shape[0])
# %%
tf_history_optim = tf_autoencoder.history.history
plt.plot(tf_history_optim['loss'],color = 'green')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss versus Epoch')
# %%
tf_preds_optim = tf_autoencoder.predict(data_feed)
autoencoder.display_outputs(tf_preds_optim,data_feed)
# %%
tf_weights_optim = tf_autoencoder_optim.get_weights()
W1 = tf_weights[0]
num_disp = W1.shape[1]
fig = plt.figure(figsize = (9,8))
for i in range(num_disp):
plt.subplot(8,8,i+1)
plt.imshow(W1.T[i].reshape(16,16),cmap = 'gray')
plt.axis('off')
fig.suptitle('Hidden Layer Feature Representation')
plt.show()
# %%
elif question == '3' :
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
def sigmoid(x):
c = np.clip(x,-700,700)
return 1 / (1 + np.exp(-c))
def dsigmoid(y):
return y * (1 - y)
def tanh(x):
return np.tanh(x)
def dtanh(y):
return 1 - y * y
# %%
with h5py.File('assign3_data3.h5','r') as F:
# Names variable contains the names of training and testing file
names = list(F.keys())
X_train = np.array(F[names[0]][()])
y_train = np.array(F[names[1]][()])
X_test = np.array(F[names[2]][()])
y_test = np.array(F[names[3]][()])
# %%
class Metrics:
"""
Necessary metrics to evaluate the model.
Functions(labels,preds):
--- confusion_matrix
--- accuracy_score
"""
def confusion_matrix(self,labels,preds):
"""
Takes desireds/labels and softmax predictions,
return a confusion matrix.
"""
label = pd.Series(labels,name='Actual')
pred = pd.Series(preds,name='Predicted')
return pd.crosstab(label,pred)
def accuracy_score(self,labels,preds):
"""
Takes desireds/labels and softmax predictions,
return a accuracy_score.
"""
count = 0
size = labels.shape[0]
for i in range(size):
if preds[i] == labels[i]:
count +=1
return 100 * (count/size)
def accuracy(self,labels,preds):
"""
Takes desireds/labels and softmax predictions,
return a accuracy.
"""
return 100 * (labels == preds).mean()
# %%
class Activations:
"""
Necessary activation functions for recurrent neural network(RNN,LSTM,GRU).
"""
def relu_alternative(self,X):
"""
Rectified linear unit activation(ReLU).
"""
return np.maximum(X, 0)
def ReLU(self,X):
"""
Rectified linear unit activation(ReLU).
Most time efficient version.
"""
return (abs(X) + X) / 2
def relu_another(self,X):
"""
Rectified linear unit activation(ReLU).
"""
return X * (X > 0)
def tanh(self,X):
return np.tanh(X)
def tanh_manuel(self,X):
"""
Hyperbolic tangent activation(tanh).
"""
return (np.exp(X) - np.exp(-X))/(np.exp(X) + np.exp(-X))
def sigmoid(self,X):
"""
Sigmoidal activation.
"""
c = np.clip(X,-700,700)
return 1/(1 + np.exp(-c))
def softmax(self,X):
"""
Stable version of softmax classifier, note that column sum is equal to 1.
"""
e_x = np.exp(X - np.max(X, axis=-1, keepdims=True))
return e_x / np.sum(e_x, axis=-1, keepdims=True)
def softmax_stable(self,X):
"""
Less stable version of softmax activation
"""
e_x = np.exp(X - np.max(X))
return e_x / np.sum(e_x)
def ReLUDerivative(self,X):
"""
The derivative of the ReLU function w.r.t. given input.
"""
return 1 * (X > 0)
def ReLU_grad(self,X):
"""
The derivative of the ReLU function w.r.t. given input.
"""
X[X<=0] = 0
X[X>1] = 1
return X
def dReLU(self,X):
"""
The derivative of the ReLU function w.r.t. given input.
"""
return np.where(X <= 0, 0, 1)
def dtanh(self,X):
"""
The derivative of the tanh function w.r.t. given input.
"""
return 1-(np.tanh(X)**2)
def dsigmoid(self,X):
"""
The derivative of the sigmoid function w.r.t. given input.
"""
return self.sigmoid(X) * (1-self.sigmoid(X))
def softmax_stable_gradient(self,soft_out):
return soft_out * (1 - soft_out)
def softmax_grad(self,softmax):
s = softmax.reshape(-1,1)
return np.diagflat(s) - np.dot(s, s.T)
def softmax_gradient(self,Sz):
"""Computes the gradient of the softmax function.
z: (T, 1) array of input values where the gradient is computed. T is the
number of output classes.
Returns D (T, T) the Jacobian matrix of softmax(z) at the given z. D[i, j]
is DjSi - the partial derivative of Si w.r.t. input j.
"""
# -SjSi can be computed using an outer product between Sz and itself. Then
# we add back Si for the i=j cases by adding a diagonal matrix with the
# values of Si on its diagonal.
D = -np.outer(Sz, Sz) + np.diag(Sz.flatten())
return D
# %%
class RNN(object):
"""
Recurrent Neural Network for classifying human activity.
RNN encapsulates all necessary logic for training the network.
"""
def __init__(self,input_dim = 3,hidden_dim = 128, seq_len = 150, learning_rate = 1e-1, mom_coeff = 0.85, batch_size = 32, output_class = 6):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim = hidden_dim
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim_inp2hid = Xavier(self.input_dim,self.hidden_dim)
self.W1 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(self.input_dim,self.hidden_dim))
self.B1 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(1,self.hidden_dim))
lim_hid2hid = Xavier(self.hidden_dim,self.hidden_dim)
self.W1_rec= np.random.uniform(-lim_hid2hid,lim_hid2hid,(self.hidden_dim,self.hidden_dim))
lim_hid2out = Xavier(self.hidden_dim,self.output_class)
self.W2 = np.random.uniform(-lim_hid2out,lim_hid2out,(self.hidden_dim,self.output_class))
self.B2 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# Storing previous momentum updates :
self.prev_updates = {'W1' : 0,
'B1' : 0,
'W1_rec' : 0,
'W2' : 0,
'B2' : 0}
def forward(self,X) -> tuple:
""" Forward propagation of the RNN through time.
Inputs:
--- X is the bacth.
--- h_prev_state is the previous state of the hidden layer.
Returns:
--- (X_state,hidden_state,probs) as a tuple.
------ 1) X_state is the input across all time steps
------ 2) hidden_state is the hidden stages across time
------ 3) probs is the probabilities of each outputs, i.e. outputs of softmax
"""
X_state = dict()
hidden_state = dict()
output_state = dict()
probs = dict()
self.h_prev_state = np.zeros((1,self.hidden_dim))
hidden_state[-1] = np.copy(self.h_prev_state)
# Loop over time T = 150 :
for t in range(self.seq_len):
# Selecting first record with 3 inputs, dimension = (batch_size,input_size)
X_state[t] = X[:,t]
# Recurrent hidden layer :
hidden_state[t] = np.tanh(np.dot(X_state[t],self.W1) + np.dot(hidden_state[t-1],self.W1_rec) + self.B1)
output_state[t] = np.dot(hidden_state[t],self.W2) + self.B2
# Per class probabilites :
probs[t] = activations.softmax(output_state[t])
return (X_state,hidden_state,probs)
def BPTT(self,cache,Y):
"""
Back propagation through time algorihm.
Inputs:
-- Cache = (X_state,hidden_state,probs)
-- Y = desired output
Returns:
-- Gradients w.r.t. all configurable elements
"""
X_state,hidden_state,probs = cache
# backward pass: compute gradients going backwards
dW1, dW1_rec, dW2 = np.zeros_like(self.W1), np.zeros_like(self.W1_rec), np.zeros_like(self.W2)
dB1, dB2 = np.zeros_like(self.B1), np.zeros_like(self.B2)
dhnext = np.zeros_like(hidden_state[0])
dy = np.copy(probs[149])
dy[np.arange(len(Y)),np.argmax(Y,1)] -= 1
dB2 += np.sum(dy,axis = 0, keepdims = True)
dW2 += np.dot(hidden_state[149].T,dy)
for t in reversed(range(1,self.seq_len)):
dh = np.dot(dy,self.W2.T) + dhnext
dhrec = (1 - (hidden_state[t] * hidden_state[t])) * dh
dB1 += np.sum(dhrec,axis = 0, keepdims = True)
dW1 += np.dot(X_state[t].T,dhrec)
dW1_rec += np.dot(hidden_state[t-1].T,dhrec)
dhnext = np.dot(dhrec,self.W1_rec.T)
for grad in [dW1,dB1,dW1_rec,dW2,dB2]:
np.clip(grad, -10, 10, out = grad)
return [dW1,dB1,dW1_rec,dW2,dB2]
def earlyStopping(self,ce_train,ce_val,ce_threshold,acc_train,acc_val,acc_threshold):
if ce_train - ce_val < ce_threshold or acc_train - acc_val > acc_threshold:
return True
else:
return False
def CategoricalCrossEntropy(self,labels,preds):
"""
Computes cross entropy between labels and model's predictions
"""
predictions = np.clip(preds, 1e-12, 1. - 1e-12)
N = predictions.shape[0]
return -np.sum(labels * np.log(predictions + 1e-9)) / N
def step(self,grads,momentum = True):
"""
SGD on mini batches
"""
#for config_param,grad in zip([self.W1,self.B1,self.W1_rec,self.W2,self.B2],grads):
#config_param -= self.learning_rate * grad
if momentum:
delta_W1 = -self.learning_rate * grads[0] + self.mom_coeff * self.prev_updates['W1']
delta_B1 = -self.learning_rate * grads[1] + self.mom_coeff * self.prev_updates['B1']
delta_W1_rec = -self.learning_rate * grads[2] + self.mom_coeff * self.prev_updates['W1_rec']
delta_W2 = -self.learning_rate * grads[3] + self.mom_coeff * self.prev_updates['W2']
delta_B2 = -self.learning_rate * grads[4] + self.mom_coeff * self.prev_updates['B2']
self.W1 += delta_W1
self.W1_rec += delta_W1_rec
self.W2 += delta_W2
self.B1 += delta_B1
self.B2 += delta_B2
self.prev_updates['W1'] = delta_W1
self.prev_updates['W1_rec'] = delta_W1_rec
self.prev_updates['W2'] = delta_W2
self.prev_updates['B1'] = delta_B1
self.prev_updates['B2'] = delta_B2
self.learning_rate *= 0.9999
def fit(self,X,Y,X_val,y_val,epochs = 50 ,verbose = True, earlystopping = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
for i in range(round(X.shape[0]/self.batch_size)):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
X_feed = X[index]
y_feed = Y[index]
cache_train = self.forward(X_feed)
grads = self.BPTT(cache_train,y_feed)
self.step(grads)
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[2][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
_,__,probs_test = self.forward(X_val)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if earlystopping:
if self.earlyStopping(ce_train = cross_loss_train,ce_val = cross_loss_val,ce_threshold = 3.0,acc_train = acc_train,acc_val = acc_val,acc_threshold = 15):
break
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def predict(self,X):
_,__,probs = self.forward(X)
return np.argmax(probs[149],axis=1)
def history(self):
return {'TrainLoss' : self.train_loss,
'TrainAcc' : self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
input_dim = 3
activations = Activations()
metrics = Metrics()
model = RNN(input_dim = input_dim,learning_rate = 1e-4, mom_coeff = 0.0, hidden_dim = 128)
# %%
model.fit(X_train,y_train,X_test,y_test,epochs = 35)
# %%
history = model.history()
# %%
plt.figure()
plt.plot(history['TestLoss'],'-o')
plt.plot(history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Categorical Cross Entropy over epochs')
plt.legend(['Test Loss','Train Loss'])
plt.show()
# %%
plt.figure()
plt.plot(history['TestAcc'],'-o')
plt.plot(history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Accuracy over epochs')
plt.legend(['Test Acc','Train Acc'])
plt.show()
# %%
train_preds = model.predict(X_train)
test_preds = model.predict(X_test)
# %%
confusion_mat_train = metrics.confusion_matrix(np.argmax(y_train,1),train_preds)
confusion_mat_test = metrics.confusion_matrix(np.argmax(y_test,1),test_preds)
# %%
body_movements = ['downstairs','jogging','sitting','standing','upstairs','walking']
confusion_mat_train_,confusion_mat_test_ = confusion_mat_train,confusion_mat_test
confusion_mat_train.columns = body_movements
confusion_mat_train.index = body_movements
print(confusion_mat_train)
# %%
confusion_mat_test.columns = body_movements
confusion_mat_test.index = body_movements
print(confusion_mat_train)
# %%
sns.heatmap(confusion_mat_train/np.sum(confusion_mat_train), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
print(confusion_mat_test)
# %%
sns.heatmap(confusion_mat_test/np.sum(confusion_mat_test), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
plt.matshow(confusion_mat_test, cmap=plt.cm.gray_r)
plt.title('Testing Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(confusion_mat_test.columns))
plt.xticks(tick_marks, confusion_mat_test.columns, rotation=45)
plt.yticks(tick_marks, confusion_mat_test.index)
plt.tight_layout()
plt.ylabel(confusion_mat_test.index.name)
plt.xlabel(confusion_mat_test.columns.name)
plt.show()
# %%
plt.matshow(confusion_mat_train, cmap=plt.cm.gray_r)
plt.title('Training Confusion Matrix')
plt.colorbar()
tick_marks = np.arange(len(confusion_mat_train.columns))
plt.xticks(tick_marks, confusion_mat_train.columns, rotation=45)
plt.yticks(tick_marks, confusion_mat_train.index)
plt.tight_layout()
plt.ylabel(confusion_mat_train.index.name)
plt.xlabel(confusion_mat_train.columns.name)
plt.show()
# %%
sns.heatmap(confusion_mat_test/np.sum(confusion_mat_test), annot=True,
fmt='.2%',cmap = 'Greens')
plt.show()
# %%
sns.heatmap(confusion_mat_test/np.sum(confusion_mat_test), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
class Multi_Layer_RNN(object):
"""
Recurrent Neural Network for classifying human activity.
RNN encapsulates all necessary logic for training the network.
"""
def __init__(self,input_dim = 3,hidden_dim_1 = 128, hidden_dim_2 = 64, seq_len = 150, learning_rate = 1e-1, mom_coeff = 0.85, batch_size = 32, output_class = 6):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim_1 = hidden_dim_1
self.hidden_dim_2 = hidden_dim_2
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim_inp2hid = Xavier(self.input_dim,self.hidden_dim_1)
self.W1 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(self.input_dim,self.hidden_dim_1))
self.B1 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(1,self.hidden_dim_1))
lim_hid2hid = Xavier(self.hidden_dim_1,self.hidden_dim_1)
self.W1_rec= np.random.uniform(-lim_hid2hid,lim_hid2hid,(self.hidden_dim_1,self.hidden_dim_1))
lim_hid2hid2 = Xavier(self.hidden_dim_1,self.hidden_dim_2)
self.W2 = np.random.uniform(-lim_hid2hid2,lim_hid2hid2,(self.hidden_dim_1,self.hidden_dim_2))
self.B2 = np.random.uniform(-lim_hid2hid2,lim_hid2hid2,(1,self.hidden_dim_2))
lim_hid2out = Xavier(self.hidden_dim_2,self.output_class)
self.W3 = np.random.uniform(-lim_hid2out,lim_hid2out,(self.hidden_dim_2,self.output_class))
self.B3 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# Storing previous momentum updates :
self.prev_updates = {'W1' : 0,
'B1' : 0,
'W1_rec' : 0,
'W2' : 0,
'B2' : 0,
'W3' : 0,
'B3' : 0}
def forward(self,X) -> tuple:
"""
Forward propagation of the RNN through time.
__________________________________________________________
Inputs:
--- X is the bacth.
--- h_prev_state is the previous state of the hidden layer.
__________________________________________________________
Returns:
--- (X_state,hidden_state,probs) as a tuple.
------ 1) X_state is the input across all time steps
------ 2) hidden_state is the hidden stages across time
------ 3) probs is the probabilities of each outputs, i.e. outputs of softmax
__________________________________________________________
"""
X_state = dict()
hidden_state_1 = dict()
hidden_state_mlp = dict()
output_state = dict()
probs = dict()
mlp_linear = dict()
self.h_prev_state = np.zeros((1,self.hidden_dim_1))
hidden_state_1[-1] = np.copy(self.h_prev_state)
# Loop over time T = 150 :
for t in range(self.seq_len):
# Selecting first record with 3 inputs, dimension = (batch_size,input_size)
X_state[t] = X[:,t]
# Recurrent hidden layer :
hidden_state_1[t] = np.tanh(np.dot(X_state[t],self.W1) + np.dot(hidden_state_1[t-1],self.W1_rec) + self.B1)
mlp_linear[t] = np.dot(hidden_state_1[t],self.W2) + self.B2
hidden_state_mlp[t] = activations.ReLU(mlp_linear[t])
output_state[t] = np.dot(hidden_state_mlp[t],self.W3) + self.B3
# Per class probabilites :
probs[t] = activations.softmax(output_state[t])
return (X_state,hidden_state_1,mlp_linear,hidden_state_mlp,probs)
def BPTT(self,cache,Y):
"""
Back propagation through time algorihm.
Inputs:
-- Cache = (X_state,hidden_state,probs)
-- Y = desired output
Returns:
-- Gradients w.r.t. all configurable elements
"""
X_state,hidden_state_1,mlp_linear,hidden_state_mlp,probs = cache
# backward pass: compute gradients going backwards
dW1, dW1_rec, dW2, dW3 = np.zeros_like(self.W1), np.zeros_like(self.W1_rec), np.zeros_like(self.W2),np.zeros_like(self.W3)
dB1, dB2,dB3 = np.zeros_like(self.B1), np.zeros_like(self.B2),np.zeros_like(self.B3)
dhnext = np.zeros_like(hidden_state_1[0])
dy = np.copy(probs[149])
dy[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#dy = probs[0] - Y[0]
dW3 += np.dot(hidden_state_mlp[149].T,dy)
dB3 += np.sum(dy,axis = 0, keepdims = True)
dy1 = np.dot(dy,self.W3.T) * activations.ReLU_grad(mlp_linear[149])
dB2 += np.sum(dy1,axis = 0, keepdims = True)
dW2 += np.dot(hidden_state_1[149].T,dy1)
for t in reversed(range(1,self.seq_len)):
dh = np.dot(dy1,self.W2.T) + dhnext
dhrec = (1 - (hidden_state_1[t] * hidden_state_1[t])) * dh
dB1 += np.sum(dhrec,axis = 0, keepdims = True)
dW1 += np.dot(X_state[t].T,dhrec)
dW1_rec += np.dot(hidden_state_1[t-1].T,dhrec)
dhnext = np.dot(dhrec,self.W1_rec.T)
for grad in [dW1,dB1,dW1_rec,dW2,dB2,dW3,dB3]:
np.clip(grad, -10, 10, out = grad)
return [dW1,dB1,dW1_rec,dW2,dB2,dW3,dB3]
def CategoricalCrossEntropy(self,labels,preds):
"""
Computes cross entropy between labels and model's predictions
"""
predictions = np.clip(preds, 1e-12, 1. - 1e-12)
N = predictions.shape[0]
return -np.sum(labels * np.log(predictions + 1e-9)) / N
def step(self,grads,momentum = True):
#for config_param,grad in zip([self.W1,self.B1,self.W1_rec,self.W2,self.B2,self.W3,self.B3],grads):
#config_param -= self.learning_rate * grad
if momentum:
delta_W1 = -self.learning_rate * grads[0] - self.mom_coeff * self.prev_updates['W1']
delta_B1 = -self.learning_rate * grads[1] - self.mom_coeff * self.prev_updates['B1']
delta_W1_rec = -self.learning_rate * grads[2] - self.mom_coeff * self.prev_updates['W1_rec']
delta_W2 = -self.learning_rate * grads[3] - self.mom_coeff * self.prev_updates['W2']
delta_B2 = -self.learning_rate * grads[4] - self.mom_coeff * self.prev_updates['B2']
delta_W3 = -self.learning_rate * grads[5] - self.mom_coeff * self.prev_updates['W3']
delta_B3 = -self.learning_rate * grads[6] - self.mom_coeff * self.prev_updates['B3']
self.W1 += delta_W1
self.W1_rec += delta_W1_rec
self.W2 += delta_W2
self.B1 += delta_B1
self.B2 += delta_B2
self.W3 += delta_W3
self.B3 += delta_B3
self.prev_updates['W1'] = delta_W1
self.prev_updates['W1_rec'] = delta_W1_rec
self.prev_updates['W2'] = delta_W2
self.prev_updates['B1'] = delta_B1
self.prev_updates['B2'] = delta_B2
self.prev_updates['W3'] = delta_W3
self.prev_updates['B3'] = delta_B3
self.learning_rate *= 0.9999
def fit(self,X,Y,X_val,y_val,epochs = 50 ,verbose = True, crossVal = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
for i in range(round(X.shape[0]/self.batch_size)):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
X_feed = X[index]
y_feed = Y[index]
cache_train = self.forward(X_feed)
grads = self.BPTT(cache_train,y_feed)
self.step(grads)
if crossVal:
stop = self.cross_validation(X,val_X,Y,val_Y,threshold = 5)
if stop:
break
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[4][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
_,__,___,____, probs_test = self.forward(X_val)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def predict(self,X):
_,__,___,____,probs = self.forward(X)
return np.argmax(probs[149],axis=1)
def history(self):
return {'TrainLoss' : self.train_loss,
'TrainAcc' : self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
multilayer_rnn = Multi_Layer_RNN(learning_rate=1e-4,mom_coeff=0.0,hidden_dim_1 = 128, hidden_dim_2 = 64)
# %%
multilayer_rnn.fit(X_train,y_train,X_test,y_test,epochs = 35)
# %%
multilayer_rnn_history = multilayer_rnn.history()
# %%
plt.figure()
plt.plot(multilayer_rnn_history['TestLoss'],'-o')
plt.plot(multilayer_rnn_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Categorical Cross Entropy over epochs')
plt.legend(['Test Loss','Train Loss'])
plt.show()
# %%
plt.figure()
plt.plot(multilayer_rnn_history['TestAcc'],'-o')
plt.plot(multilayer_rnn_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Accuracy over epochs')
plt.legend(['Test Acc','Train Acc'])
plt.show()
# %%
plt.figure()
plt.plot(multilayer_rnn_history['TrainAcc'],'-o')
plt.plot(history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['Multi Layer RNN','Vanilla RNN'])
plt.show()
# %%
plt.plot(multilayer_rnn_history['TestAcc'],'-o')
plt.plot(history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['Multi Layer RNN','Vanilla RNN'])
plt.show()
# %%
train_preds_multilayer_rnn = multilayer_rnn.predict(X_train)
test_preds_multilayer_rnn = multilayer_rnn.predict(X_test)
confusion_mat_train_multilayer_rnn = metrics.confusion_matrix(np.argmax(y_train,1),train_preds_multilayer_rnn)
confusion_mat_test_multilayer_rnn = metrics.confusion_matrix(np.argmax(y_test,1),test_preds_multilayer_rnn)
body_movements = ['downstairs','jogging','sitting','standing','upstairs','walking']
confusion_mat_train_multilayer_rnn.columns = body_movements
confusion_mat_train_multilayer_rnn.index = body_movements
confusion_mat_test_multilayer_rnn.columns = body_movements
confusion_mat_test_multilayer_rnn.index = body_movements
print(confusion_mat_train_multilayer_rnn)
# %%
print(confusion_mat_test_multilayer_rnn)
# %%
sns.heatmap(confusion_mat_test_multilayer_rnn/np.sum(confusion_mat_test_multilayer_rnn), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
sns.heatmap(confusion_mat_train_multilayer_rnn/np.sum(confusion_mat_train_multilayer_rnn), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
class Three_Hidden_Layer_RNN(object):
"""
Recurrent Neural Network for classifying human activity.
RNN encapsulates all necessary logic for training the network.
"""
def __init__(self,input_dim = 3,hidden_dim_1 = 128, hidden_dim_2 = 64,hidden_dim_3 = 32, seq_len = 150, learning_rate = 1e-1, mom_coeff = 0.85, batch_size = 32, output_class = 6):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim_1 = hidden_dim_1
self.hidden_dim_2 = hidden_dim_2
self.hidden_dim_3 = hidden_dim_3
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim_inp2hid = Xavier(self.input_dim,self.hidden_dim_1)
self.W1 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(self.input_dim,self.hidden_dim_1))
self.B1 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(1,self.hidden_dim_1))
lim_hid2hid = Xavier(self.hidden_dim_1,self.hidden_dim_1)
self.W1_rec= np.random.uniform(-lim_hid2hid,lim_hid2hid,(self.hidden_dim_1,self.hidden_dim_1))
lim_hid2hid2 = Xavier(self.hidden_dim_1,self.hidden_dim_2)
self.W2 = np.random.uniform(-lim_hid2hid2,lim_hid2hid2,(self.hidden_dim_1,self.hidden_dim_2))
self.B2 = np.random.uniform(-lim_hid2hid2,lim_hid2hid2,(1,self.hidden_dim_2))
lim_hid2hid3 = Xavier(self.hidden_dim_2,self.hidden_dim_3)
self.W3 = np.random.uniform(-lim_hid2hid3,lim_hid2hid3,(self.hidden_dim_2,self.hidden_dim_3))
self.B3 = np.random.uniform(-lim_hid2hid3,lim_hid2hid3,(1,self.hidden_dim_3))
lim_hid2out = Xavier(self.hidden_dim_3,self.output_class)
self.W4 = np.random.uniform(-lim_hid2out,lim_hid2out,(self.hidden_dim_3,self.output_class))
self.B4 = np.random.uniform(-lim_hid2out,lim_hid2out,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# Storing previous momentum updates :
self.prev_updates = {'W1' : 0,
'B1' : 0,
'W1_rec' : 0,
'W2' : 0,
'B2' : 0,
'W3' : 0,
'W4' : 0,
'B3' : 0,
'B4' : 0}
def forward(self,X) -> tuple:
"""
Forward propagation of the RNN through time.
__________________________________________________________
Inputs:
--- X is the bacth.
--- h_prev_state is the previous state of the hidden layer.
__________________________________________________________
Returns:
--- (X_state,hidden_state,probs) as a tuple.
------ 1) X_state is the input across all time steps
------ 2) hidden_state is the hidden stages across time
------ 3) probs is the probabilities of each outputs, i.e. outputs of softmax
__________________________________________________________
"""
X_state = dict()
hidden_state_1 = dict()
hidden_state_mlp = dict()
hidden_state_mlp_2 = dict()
output_state = dict()
probs = dict()
mlp_linear = dict()
mlp_linear_2 = dict()
self.h_prev_state = np.zeros((1,self.hidden_dim_1))
hidden_state_1[-1] = np.copy(self.h_prev_state)
# Loop over time T = 150 :
for t in range(self.seq_len):
# Selecting first record with 3 inputs, dimension = (batch_size,input_size)
X_state[t] = X[:,t]
# Recurrent hidden layer :
hidden_state_1[t] = np.tanh(np.dot(X_state[t],self.W1) + np.dot(hidden_state_1[t-1],self.W1_rec) + self.B1)
mlp_linear[t] = np.dot(hidden_state_1[t],self.W2) + self.B2
hidden_state_mlp[t] = activations.ReLU(mlp_linear[t])
mlp_linear_2[t] = np.dot(hidden_state_mlp[t],self.W3) + self.B3
hidden_state_mlp_2[t] = activations.ReLU(mlp_linear_2[t])
output_state[t] = np.dot(hidden_state_mlp_2[t],self.W4) + self.B4
# Per class probabilites :
probs[t] = activations.softmax(output_state[t])
return (X_state,hidden_state_1,mlp_linear,hidden_state_mlp,mlp_linear_2,hidden_state_mlp_2,probs)
def BPTT(self,cache,Y):
"""
Back propagation through time algorihm.
Inputs:
-- Cache = (X_state,hidden_state,probs)
-- Y = desired output
Returns:
-- Gradients w.r.t. all configurable elements
"""
X_state,hidden_state_1,mlp_linear,hidden_state_mlp,mlp_linear_2,hidden_state_mlp_2,probs = cache
# backward pass: compute gradients going backwards
dW1, dW1_rec, dW2, dW3, dW4 = np.zeros_like(self.W1), np.zeros_like(self.W1_rec), np.zeros_like(self.W2),np.zeros_like(self.W3),np.zeros_like(self.W4)
dB1, dB2,dB3,dB4 = np.zeros_like(self.B1), np.zeros_like(self.B2),np.zeros_like(self.B3),np.zeros_like(self.B4)
dhnext = np.zeros_like(hidden_state_1[0])
for t in reversed(range(1,self.seq_len)):
dy = np.copy(probs[t])
dy[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#dy = probs[0] - Y[0]
dW4 += np.dot(hidden_state_mlp_2[t].T,dy)
dB4 += np.sum(dy,axis = 0, keepdims = True)
dy1 = np.dot(dy,self.W4.T) * activations.ReLU_grad(mlp_linear_2[t])
dW3 += np.dot(hidden_state_mlp[t].T,dy1)
dB3 += np.sum(dy1,axis = 0, keepdims = True)
dy2 = np.dot(dy1,self.W3.T) * activations.ReLU_grad(mlp_linear[t])
dB2 += np.sum(dy2,axis = 0, keepdims = True)
dW2 += np.dot(hidden_state_1[t].T,dy2)
dh = np.dot(dy2,self.W2.T) + dhnext
dhrec = (1 - (hidden_state_1[t] * hidden_state_1[t])) * dh
dB1 += np.sum(dhrec,axis = 0, keepdims = True)
dW1 += np.dot(X_state[t].T,dhrec)
dW1_rec += np.dot(hidden_state_1[t-1].T,dhrec)
dhnext = np.dot(dhrec,self.W1_rec.T)
for grad in [dW1,dB1,dW1_rec,dW2,dB2,dW3,dB3,dW4,dB4]:
np.clip(grad, -10, 10, out = grad)
return [dW1,dB1,dW1_rec,dW2,dB2,dW3,dB3,dW4,dB4]
def CategoricalCrossEntropy(self,labels,preds):
"""
Computes cross entropy between labels and model's predictions
"""
predictions = np.clip(preds, 1e-12, 1. - 1e-12)
N = predictions.shape[0]
return -np.sum(labels * np.log(predictions + 1e-9)) / N
def step(self,grads,momentum = True):
#for config_param,grad in zip([self.W1,self.B1,self.W1_rec,self.W2,self.B2,self.W3,self.B3],grads):
#config_param -= self.learning_rate * grad
if momentum:
delta_W1 = -self.learning_rate * grads[0] + self.mom_coeff * self.prev_updates['W1']
delta_B1 = -self.learning_rate * grads[1] + self.mom_coeff * self.prev_updates['B1']
delta_W1_rec = -self.learning_rate * grads[2] + self.mom_coeff * self.prev_updates['W1_rec']
delta_W2 = -self.learning_rate * grads[3] + self.mom_coeff * self.prev_updates['W2']
delta_B2 = -self.learning_rate * grads[4] + self.mom_coeff * self.prev_updates['B2']
delta_W3 = -self.learning_rate * grads[5] + self.mom_coeff * self.prev_updates['W3']
delta_B3 = -self.learning_rate * grads[6] + self.mom_coeff * self.prev_updates['B3']
delta_W4 = -self.learning_rate * grads[7] + self.mom_coeff * self.prev_updates['W4']
delta_B4 = -self.learning_rate * grads[8] + self.mom_coeff * self.prev_updates['B4']
self.W1 += delta_W1
self.W1_rec += delta_W1_rec
self.W2 += delta_W2
self.B1 += delta_B1
self.B2 += delta_B2
self.W3 += delta_W3
self.B3 += delta_B3
self.W4 += delta_W4
self.B4 += delta_B4
self.prev_updates['W1'] = delta_W1
self.prev_updates['W1_rec'] = delta_W1_rec
self.prev_updates['W2'] = delta_W2
self.prev_updates['B1'] = delta_B1
self.prev_updates['B2'] = delta_B2
self.prev_updates['W3'] = delta_W3
self.prev_updates['B3'] = delta_B3
self.prev_updates['W4'] = delta_W4
self.prev_updates['B4'] = delta_B4
self.learning_rate *= 0.9999
def fit(self,X,Y,X_val,y_val,epochs = 50 ,verbose = True, crossVal = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
for i in range(round(X.shape[0]/self.batch_size)):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
X_feed = X[index]
y_feed = Y[index]
cache_train = self.forward(X_feed)
grads = self.BPTT(cache_train,y_feed)
self.step(grads)
if crossVal:
stop = self.cross_validation(X,val_X,Y,val_Y,threshold = 5)
if stop:
break
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[6][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
_,__,___,____,_____,______, probs_test = self.forward(X_val)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def predict(self,X):
_,__,___,____,_____,______,probs = self.forward(X)
return np.argmax(probs[149],axis=1)
def history(self):
return {'TrainLoss' : self.train_loss,
'TrainAcc' : self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
three_layer_rnn = Three_Hidden_Layer_RNN(hidden_dim_1 = 128, hidden_dim_2 = 64,hidden_dim_3 = 32, learning_rate = 1e-4, mom_coeff = 0.0, batch_size = 32, output_class = 6)
# %%
three_layer_rnn.fit(X_train,y_train,X_test,y_test,epochs=15)
# %%
three_layer_rnn_v1 = Three_Hidden_Layer_RNN(hidden_dim_1 = 128, hidden_dim_2 = 64,hidden_dim_3 = 32, learning_rate = 5e-5, mom_coeff = 0.0, batch_size = 32, output_class = 6)
three_layer_rnn_v1.fit(X_train,y_train,X_test,y_test,epochs=15)
# %%
three_layer_rnn_v2 = Three_Hidden_Layer_RNN(hidden_dim_1 = 128, hidden_dim_2 = 64,hidden_dim_3 = 32, learning_rate = 1e-4, mom_coeff = 0.0, batch_size = 32, output_class = 6)
three_layer_rnn_v2.fit(X_train,y_train,X_test,y_test,epochs=15)
# %%
three_layer_rnn_history = three_layer_rnn.history()
plt.figure()
plt.plot(three_layer_rnn_history['TestLoss'],'-o')
plt.plot(three_layer_rnn_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Categorical Cross Entropy over epochs')
plt.legend(['Test Loss','Train Loss'])
plt.show()
# %%
plt.figure()
plt.plot(three_layer_rnn_history['TestAcc'],'-o')
plt.plot(three_layer_rnn_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Accuracy over epochs')
plt.legend(['Test Acc','Train Acc'])
plt.show()
# %%
plt.figure()
plt.plot(three_layer_rnn_history['TrainAcc'],'-o')
plt.plot(multilayer_rnn_history['TrainAcc'],'-o')
plt.plot(history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['3 hidden layer Rnn','Multi Layer RNN','Vanilla RNN'])
plt.show()
# %%
plt.figure()
plt.plot(three_layer_rnn_history['TestAcc'],'-o')
plt.plot(multilayer_rnn_history['TestAcc'],'-o')
plt.plot(history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['3 hidden layer Rnn','Multi Layer RNN','Vanilla RNN'])
plt.show()
# %%
train_preds_three_layer_rnn_history = three_layer_rnn.predict(X_train)
test_preds_three_layer_rnn_history = three_layer_rnn.predict(X_test)
confusion_mat_train_three_layer_rnn_history = metrics.confusion_matrix(np.argmax(y_train,1),train_preds_three_layer_rnn_history)
confusion_mat_test_three_layer_rnn_history = metrics.confusion_matrix(np.argmax(y_test,1),test_preds_three_layer_rnn_history)
body_movements = ['downstairs','jogging','sitting','standing','upstairs','walking']
confusion_mat_train_three_layer_rnn_history.columns = body_movements
confusion_mat_train_three_layer_rnn_history.index = body_movements
confusion_mat_test_three_layer_rnn_history.columns = body_movements
confusion_mat_test_three_layer_rnn_history.index = body_movements
print(confusion_mat_train_three_layer_rnn_history)
# %%
sns.heatmap(confusion_mat_test_three_layer_rnn_history/np.sum(confusion_mat_test_three_layer_rnn_history), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
sns.heatmap(confusion_mat_train_three_layer_rnn_history/np.sum(confusion_mat_train_three_layer_rnn_history), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
class Five_Hidden_Layer_RNN(object):
"""
Recurrent Neural Network for classifying human activity.
RNN encapsulates all necessary logic for training the network.
"""
def __init__(self,input_dim = 3,hidden_dim_1 = 128, hidden_dim_2 = 64,hidden_dim_3 = 32,hidden_dim_4 = 16 ,hidden_dim_5 = 8, seq_len = 150, learning_rate = 1e-1, mom_coeff = 0.85, batch_size = 32, output_class = 6):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim_1 = hidden_dim_1
self.hidden_dim_2 = hidden_dim_2
self.hidden_dim_3 = hidden_dim_3
self.hidden_dim_4 = hidden_dim_4
self.hidden_dim_5 = hidden_dim_5
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim_inp2hid = Xavier(self.input_dim,self.hidden_dim_1)
self.W1 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(self.input_dim,self.hidden_dim_1))
self.B1 = np.random.uniform(-lim_inp2hid,lim_inp2hid,(1,self.hidden_dim_1))
lim_hid2hid = Xavier(self.hidden_dim_1,self.hidden_dim_1)
self.W1_rec= np.random.uniform(-lim_hid2hid,lim_hid2hid,(self.hidden_dim_1,self.hidden_dim_1))
lim_hid2hid2 = Xavier(self.hidden_dim_1,self.hidden_dim_2)
self.W2 = np.random.uniform(-lim_hid2hid2,lim_hid2hid2,(self.hidden_dim_1,self.hidden_dim_2))
self.B2 = np.random.uniform(-lim_hid2hid2,lim_hid2hid2,(1,self.hidden_dim_2))
lim_hid2hid3 = Xavier(self.hidden_dim_2,self.hidden_dim_3)
self.W3 = np.random.uniform(-lim_hid2hid3,lim_hid2hid3,(self.hidden_dim_2,self.hidden_dim_3))
self.B3 = np.random.uniform(-lim_hid2hid3,lim_hid2hid3,(1,self.hidden_dim_3))
lim_hid2hid4 = Xavier(self.hidden_dim_3,self.hidden_dim_4)
self.W4 = np.random.uniform(-lim_hid2hid4,lim_hid2hid4,(self.hidden_dim_3,self.hidden_dim_4))
self.B4 = np.random.uniform(-lim_hid2hid4,lim_hid2hid4,(1,self.hidden_dim_4))
lim_hid2hid5 = Xavier(self.hidden_dim_4,self.hidden_dim_5)
self.W5 = np.random.uniform(-lim_hid2hid5,lim_hid2hid5,(self.hidden_dim_4,self.hidden_dim_5))
self.B5 = np.random.uniform(-lim_hid2hid5,lim_hid2hid5,(1,self.hidden_dim_5))
lim_hid2out = Xavier(self.hidden_dim_5,self.output_class)
self.W6 = np.random.uniform(-lim_hid2out,lim_hid2out,(self.hidden_dim_5,self.output_class))
self.B6 = np.random.uniform(-lim_hid2out,lim_hid2out,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# Storing previous momentum updates :
self.prev_updates = {'W1' : 0,
'B1' : 0,
'W1_rec' : 0,
'W2' : 0,
'B2' : 0,
'W3' : 0,
'W4' : 0,
'B3' : 0,
'B4' : 0,
'W5' : 0,
'W6' : 0,
'B5' : 0,
'B6' : 0}
def forward(self,X) -> tuple:
"""
Forward propagation of the RNN through time.
__________________________________________________________
Inputs:
--- X is the bacth.
--- h_prev_state is the previous state of the hidden layer.
__________________________________________________________
Returns:
--- (X_state,hidden_state,probs) as a tuple.
------ 1) X_state is the input across all time steps
------ 2) hidden_state is the hidden stages across time
------ 3) probs is the probabilities of each outputs, i.e. outputs of softmax
__________________________________________________________
"""
X_state = dict()
hidden_state_1 = dict()
hidden_state_mlp = dict()
hidden_state_mlp_2 = dict()
hidden_state_mlp_3 = dict()
hidden_state_mlp_4 = dict()
output_state = dict()
probs = dict()
mlp_linear = dict()
mlp_linear_2 = dict()
mlp_linear_3 = dict()
mlp_linear_4 = dict()
self.h_prev_state = np.zeros((1,self.hidden_dim_1))
hidden_state_1[-1] = np.copy(self.h_prev_state)
# Loop over time T = 150 :
for t in range(self.seq_len):
# Selecting first record with 3 inputs, dimension = (batch_size,input_size)
X_state[t] = X[:,t]
# Recurrent hidden layer :
hidden_state_1[t] = np.tanh(np.dot(X_state[t],self.W1) + np.dot(hidden_state_1[t-1],self.W1_rec) + self.B1)
mlp_linear[t] = np.dot(hidden_state_1[t],self.W2) + self.B2
hidden_state_mlp[t] = activations.ReLU(mlp_linear[t])
mlp_linear_2[t] = np.dot(hidden_state_mlp[t],self.W3) + self.B3
hidden_state_mlp_2[t] = activations.ReLU(mlp_linear_2[t])
mlp_linear_3[t] = np.dot(hidden_state_mlp_2[t],self.W4) + self.B4
hidden_state_mlp_3[t] = activations.ReLU(mlp_linear_3[t])
mlp_linear_4[t] = np.dot(hidden_state_mlp_3[t],self.W5) + self.B5
hidden_state_mlp_4[t] = activations.ReLU(mlp_linear_4[t])
output_state[t] = np.dot(hidden_state_mlp_4[t],self.W6) + self.B6
# Per class probabilites :
probs[t] = activations.softmax(output_state[t])
return (X_state,hidden_state_1,mlp_linear,hidden_state_mlp,mlp_linear_2,hidden_state_mlp_2,mlp_linear_3,hidden_state_mlp_3,mlp_linear_4,hidden_state_mlp_4,probs)
def BPTT(self,cache,Y):
"""
Back propagation through time algorihm.
Inputs:
-- Cache = (X_state,hidden_state_1,mlp_linear,hidden_state_mlp,mlp_linear_2,hidden_state_mlp_2,mlp_linear_3,hidden_state_mlp_3,mlp_linear_4,hidden_state_mlp_4,probs)
-- Y = desired output
Returns:
-- Gradients w.r.t. all configurable elements
"""
X_state,hidden_state_1,mlp_linear,hidden_state_mlp,mlp_linear_2,hidden_state_mlp_2,mlp_linear_3,hidden_state_mlp_3,mlp_linear_4,hidden_state_mlp_4,probs = cache
# backward pass: compute gradients going backwards
dW1, dW1_rec, dW2, dW3, dW4, dW5, dW6 = np.zeros_like(self.W1), np.zeros_like(self.W1_rec), np.zeros_like(self.W2),np.zeros_like(self.W3),np.zeros_like(self.W4),np.zeros_like(self.W5),np.zeros_like(self.W6)
dB1, dB2,dB3,dB4,dB5,dB6 = np.zeros_like(self.B1), np.zeros_like(self.B2),np.zeros_like(self.B3),np.zeros_like(self.B4),np.zeros_like(self.B5),np.zeros_like(self.B6)
dhnext = np.zeros_like(hidden_state_1[0])
for t in reversed(range(1,self.seq_len)):
dy = np.copy(probs[149])
dy[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#dy = probs[0] - Y[0]
dW6 += np.dot(hidden_state_mlp_4[t].T,dy)
dB6 += np.sum(dy,axis = 0, keepdims = True)
dy1 = np.dot(dy,self.W6.T) * activations.ReLU_grad(mlp_linear_4[t])
dW5 += np.dot(hidden_state_mlp_3[t].T,dy1)
dB5 += np.sum(dy1,axis = 0, keepdims = True)
dy2 = np.dot(dy1,self.W5.T) * activations.ReLU_grad(mlp_linear_3[t])
dW4 += np.dot(hidden_state_mlp_2[t].T,dy2)
dB4 += np.sum(dy2,axis = 0, keepdims = True)
dy3 = np.dot(dy2,self.W4.T) * activations.ReLU_grad(mlp_linear_2[t])
dW3 += np.dot(hidden_state_mlp[t].T,dy3)
dB3 += np.sum(dy3,axis = 0, keepdims = True)
dy4 = np.dot(dy3,self.W3.T) * activations.ReLU_grad(mlp_linear[t])
dB2 += np.sum(dy4,axis = 0, keepdims = True)
dW2 += np.dot(hidden_state_1[t].T,dy4)
dh = np.dot(dy4,self.W2.T) + dhnext
dhrec = (1 - (hidden_state_1[t] * hidden_state_1[t])) * dh
dB1 += np.sum(dhrec,axis = 0, keepdims = True)
dW1 += np.dot(X_state[t].T,dhrec)
dW1_rec += np.dot(hidden_state_1[t-1].T,dhrec)
dhnext = np.dot(dhrec,self.W1_rec.T)
for grad in [dW1,dB1,dW1_rec,dW2,dB2,dW3,dB3,dW4,dB4,dW5,dB5,dW6,dB6]:
np.clip(grad, -10, 10, out = grad)
return [dW1,dB1,dW1_rec,dW2,dB2,dW3,dB3,dW4,dB4,dW5,dB5,dW6,dB6]
def CategoricalCrossEntropy(self,labels,preds):
"""
Computes cross entropy between labels and model's predictions
"""
predictions = np.clip(preds, 1e-12, 1. - 1e-12)
N = predictions.shape[0]
return -np.sum(labels * np.log(predictions + 1e-9)) / N
def step(self,grads,momentum = True):
#for config_param,grad in zip([self.W1,self.B1,self.W1_rec,self.W2,self.B2,self.W3,self.B3],grads):
#config_param -= self.learning_rate * grad
if momentum:
delta_W1 = -self.learning_rate * grads[0] + self.mom_coeff * self.prev_updates['W1']
delta_B1 = -self.learning_rate * grads[1] + self.mom_coeff * self.prev_updates['B1']
delta_W1_rec = -self.learning_rate * grads[2] + self.mom_coeff * self.prev_updates['W1_rec']
delta_W2 = -self.learning_rate * grads[3] + self.mom_coeff * self.prev_updates['W2']
delta_B2 = -self.learning_rate * grads[4] + self.mom_coeff * self.prev_updates['B2']
delta_W3 = -self.learning_rate * grads[5] + self.mom_coeff * self.prev_updates['W3']
delta_B3 = -self.learning_rate * grads[6] + self.mom_coeff * self.prev_updates['B3']
delta_W4 = -self.learning_rate * grads[7] + self.mom_coeff * self.prev_updates['W4']
delta_B4 = -self.learning_rate * grads[8] + self.mom_coeff * self.prev_updates['B4']
delta_W5 = -self.learning_rate * grads[9] + self.mom_coeff * self.prev_updates['W5']
delta_B5 = -self.learning_rate * grads[10] + self.mom_coeff * self.prev_updates['B5']
delta_W6 = -self.learning_rate * grads[11] + self.mom_coeff * self.prev_updates['W6']
delta_B6 = -self.learning_rate * grads[12] + self.mom_coeff * self.prev_updates['B6']
self.W1 += delta_W1
self.W1_rec += delta_W1_rec
self.W2 += delta_W2
self.B1 += delta_B1
self.B2 += delta_B2
self.W3 += delta_W3
self.B3 += delta_B3
self.W4 += delta_W4
self.B4 += delta_B4
self.W5 += delta_W5
self.B5 += delta_B5
self.W6 += delta_W6
self.B6 += delta_B6
self.prev_updates['W1'] = delta_W1
self.prev_updates['W1_rec'] = delta_W1_rec
self.prev_updates['W2'] = delta_W2
self.prev_updates['B1'] = delta_B1
self.prev_updates['B2'] = delta_B2
self.prev_updates['W3'] = delta_W3
self.prev_updates['B3'] = delta_B3
self.prev_updates['W4'] = delta_W4
self.prev_updates['B4'] = delta_B4
self.prev_updates['W5'] = delta_W5
self.prev_updates['B5'] = delta_B5
self.prev_updates['W6'] = delta_W6
self.prev_updates['B6'] = delta_B6
self.learning_rate *= 0.9999
def fit(self,X,Y,X_val,y_val,epochs = 50 ,verbose = True, crossVal = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
for i in range(round(X.shape[0]/self.batch_size)):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
X_feed = X[index]
y_feed = Y[index]
cache_train = self.forward(X_feed)
grads = self.BPTT(cache_train,y_feed)
self.step(grads)
if crossVal:
stop = self.cross_validation(X,val_X,Y,val_Y,threshold = 5)
if stop:
break
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[10][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
_,__,___,____,_____,______,_______,________,__________,___________, probs_test = self.forward(X_val)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def predict(self,X):
_,__,___,____,_____,______,_______,________,__________,___________, probs = self.forward(X)
return np.argmax(probs[149],axis=1)
def history(self):
return {'TrainLoss' : self.train_loss,
'TrainAcc' : self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
five_hidden_layer_rnn = Five_Hidden_Layer_RNN(hidden_dim_1 = 128, hidden_dim_2 = 64,hidden_dim_3 = 32,hidden_dim_4 = 16 ,hidden_dim_5 = 8, learning_rate = 1e-4, mom_coeff = 0.0)
# %%
five_hidden_layer_rnn.fit(X_train,y_train,X_test,y_test,epochs = 35)
# %%
five_hidden_layer_rnn_history = five_hidden_layer_rnn.history()
plt.figure()
plt.plot(five_hidden_layer_rnn_history['TestLoss'],'-o')
plt.plot(five_hidden_layer_rnn_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Categorical Cross Entropy over epochs')
plt.legend(['Test Loss','Train Loss'])
plt.show()
# %%
plt.figure()
plt.plot(five_hidden_layer_rnn_history['TestAcc'],'-o')
plt.plot(five_hidden_layer_rnn_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Accuracy over epochs')
plt.legend(['Test Acc','Train Acc'])
plt.show()
# %%
plt.figure()
plt.plot(five_hidden_layer_rnn_history['TrainAcc'],'-o')
plt.plot(three_layer_rnn_history['TrainAcc'],'-o')
plt.plot(multilayer_rnn_history['TrainAcc'],'-o')
plt.plot(history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['Five hidden layer RNN','3 hidden layer RNN','Multi Layer RNN','Vanilla RNN'])
plt.show()
# %%
plt.figure()
plt.plot(five_hidden_layer_rnn_history['TestAcc'],'-o')
plt.plot(three_layer_rnn_history['TestAcc'],'-o')
plt.plot(multilayer_rnn_history['TestAcc'],'-o')
plt.plot(history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['Five hidden layer RNN','3 hidden layer RNN','Multi Layer RNN','Vanilla RNN'])
plt.show()
# %%
train_preds_five_hidden_layer_rnn = five_hidden_layer_rnn.predict(X_train)
test_preds_five_hidden_layer_rnn = five_hidden_layer_rnn.predict(X_test)
confusion_mat_train_five_hidden_layer_rnn = metrics.confusion_matrix(np.argmax(y_train,1),train_preds_five_hidden_layer_rnn)
confusion_mat_test_five_hidden_layer_rnn = metrics.confusion_matrix(np.argmax(y_test,1),test_preds_five_hidden_layer_rnn)
body_movements = ['downstairs','jogging','sitting','standing','upstairs','walking']
confusion_mat_train_five_hidden_layer_rnn.columns = body_movements
confusion_mat_train_five_hidden_layer_rnn.index = body_movements
confusion_mat_test_five_hidden_layer_rnn.columns = body_movements
confusion_mat_test_five_hidden_layer_rnn.index = body_movements
print(confusion_mat_test_five_hidden_layer_rnn)
# %%
sns.heatmap(confusion_mat_test_five_hidden_layer_rnn/np.sum(confusion_mat_test_five_hidden_layer_rnn), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
sns.heatmap(confusion_mat_train_five_hidden_layer_rnn/np.sum(confusion_mat_train_five_hidden_layer_rnn), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %% [markdown]
# LSTM
# %%
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def dsigmoid(y):
return y * (1 - y)
def tanh(x):
return np.tanh(x)
def dtanh(y):
return 1 - y * y
# %%
class LSTM(object):
"""
Long-Short Term Memory Recurrent neural network, encapsulates all necessary logic for training, then built the hyperparameters and architecture of the network.
"""
def __init__(self,input_dim = 3,hidden_dim = 100,output_class = 6,seq_len = 150,batch_size = 30,learning_rate = 1e-1,mom_coeff = 0.85):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim = hidden_dim
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
self.input_stack_dim = self.input_dim + self.hidden_dim
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim1 = Xavier(self.input_dim,self.hidden_dim)
self.W_f = np.random.uniform(-lim1,lim1,(self.input_stack_dim,self.hidden_dim))
self.B_f = np.random.uniform(-lim1,lim1,(1,self.hidden_dim))
self.W_i = np.random.uniform(-lim1,lim1,(self.input_stack_dim,self.hidden_dim))
self.B_i = np.random.uniform(-lim1,lim1,(1,self.hidden_dim))
self.W_c = np.random.uniform(-lim1,lim1,(self.input_stack_dim,self.hidden_dim))
self.B_c = np.random.uniform(-lim1,lim1,(1,self.hidden_dim))
self.W_o = np.random.uniform(-lim1,lim1,(self.input_stack_dim,self.hidden_dim))
self.B_o = np.random.uniform(-lim1,lim1,(1,self.hidden_dim))
lim2 = Xavier(self.hidden_dim,self.output_class)
self.W = np.random.uniform(-lim2,lim2,(self.hidden_dim,self.output_class))
self.B = np.random.uniform(-lim2,lim2,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# To keep previous updates in momentum :
self.previous_updates = [0] * 10
# For AdaGrad:
self.cache = [0] * 10
self.cache_rmsprop = [0] * 10
self.m = [0] * 10
self.v = [0] * 10
self.t = 1
def cell_forward(self,X,h_prev,C_prev):
"""
Takes input, previous hidden state and previous cell state, compute:
--- Forget gate + Input gate + New candidate input + New cell state +
output gate + hidden state. Then, classify by softmax.
"""
#print(X.shape,h_prev.shape)
# Stacking previous hidden state vector with inputs:
stack = np.column_stack([X,h_prev])
# Forget gate:
forget_gate = activations.sigmoid(np.dot(stack,self.W_f) + self.B_f)
# İnput gate:
input_gate = activations.sigmoid(np.dot(stack,self.W_i) + self.B_i)
# New candidate:
cell_bar = np.tanh(np.dot(stack,self.W_c) + self.B_c)
# New Cell state:
cell_state = forget_gate * C_prev + input_gate * cell_bar
# Output fate:
output_gate = activations.sigmoid(np.dot(stack,self.W_o) + self.B_o)
# Hidden state:
hidden_state = output_gate * np.tanh(cell_state)
# Classifiers (Softmax) :
dense = np.dot(hidden_state,self.W) + self.B
probs = activations.softmax(dense)
return (stack,forget_gate,input_gate,cell_bar,cell_state,output_gate,hidden_state,dense,probs)
def forward(self,X,h_prev,C_prev):
x_s,z_s,f_s,i_s = {},{},{},{}
C_bar_s,C_s,o_s,h_s = {},{},{},{}
v_s,y_s = {},{}
h_s[-1] = np.copy(h_prev)
C_s[-1] = np.copy(C_prev)
for t in range(self.seq_len):
x_s[t] = X[:,t,:]
z_s[t], f_s[t], i_s[t], C_bar_s[t], C_s[t], o_s[t], h_s[t],v_s[t], y_s[t] = self.cell_forward(x_s[t],h_s[t-1],C_s[t-1])
return (z_s, f_s, i_s, C_bar_s, C_s, o_s, h_s,v_s, y_s)
def BPTT(self,outs,Y):
z_s, f_s, i_s, C_bar_s, C_s, o_s, h_s,v_s, y_s = outs
dW_f, dW_i,dW_c, dW_o,dW = np.zeros_like(self.W_f), np.zeros_like(self.W_i), np.zeros_like(self.W_c),np.zeros_like(self.W_o),np.zeros_like(self.W)
dB_f, dB_i,dB_c,dB_o,dB = np.zeros_like(self.B_f), np.zeros_like(self.B_i),np.zeros_like(self.B_c),np.zeros_like(self.B_o),np.zeros_like(self.B)
dh_next = np.zeros_like(h_s[0])
dC_next = np.zeros_like(C_s[0])
# w.r.t. softmax input
ddense = np.copy(y_s[149])
ddense[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#ddense[np.argmax(Y,1)] -=1
#ddense = y_s[149] - Y
# Softmax classifier's :
dW = np.dot(h_s[149].T,ddense)
dB = np.sum(ddense,axis = 0, keepdims = True)
# Backprop through time:
for t in reversed(range(1,self.seq_len)):
# Just equating more meaningful names
stack,forget_gate,input_gate,cell_bar,cell_state,output_gate,hidden_state,dense,probs = z_s[t], f_s[t], i_s[t], C_bar_s[t], C_s[t], o_s[t], h_s[t],v_s[t], y_s[t]
C_prev = C_s[t-1]
# w.r.t. softmax input
#ddense = np.copy(probs)
#ddense[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#ddense[np.arange(len(Y)),np.argmax(Y,1)] -=1
# Softmax classifier's :
#dW += np.dot(hidden_state.T,ddense)
#dB += np.sum(ddense,axis = 0, keepdims = True)
# Output gate :
dh = np.dot(ddense,self.W.T) + dh_next
do = dh * np.tanh(cell_state)
do = do * dsigmoid(output_gate)
dW_o += np.dot(stack.T,do)
dB_o += np.sum(do,axis = 0, keepdims = True)
# Cell state:
dC = np.copy(dC_next)
dC += dh * output_gate * activations.dtanh(cell_state)
dC_bar = dC * input_gate
dC_bar = dC_bar * dtanh(cell_bar)
dW_c += np.dot(stack.T,dC_bar)
dB_c += np.sum(dC_bar,axis = 0, keepdims = True)
# Input gate:
di = dC * cell_bar
di = dsigmoid(input_gate) * di
dW_i += np.dot(stack.T,di)
dB_i += np.sum(di,axis = 0,keepdims = True)
# Forget gate:
df = dC * C_prev
df = df * dsigmoid(forget_gate)
dW_f += np.dot(stack.T,df)
dB_f += np.sum(df,axis = 0, keepdims = True)
dz = np.dot(df,self.W_f.T) + np.dot(di,self.W_i.T) + np.dot(dC_bar,self.W_c.T) + np.dot(do,self.W_o.T)
dh_next = dz[:,-self.hidden_dim:]
dC_next = forget_gate * dC
# List of gradients :
grads = [dW,dB,dW_o,dB_o,dW_c,dB_c,dW_i,dB_i,dW_f,dB_f]
# Clipping gradients anyway
for grad in grads:
np.clip(grad, -15, 15, out = grad)
return h_s[self.seq_len - 1],C_s[self.seq_len -1 ],grads
def fit(self,X,Y,X_val,y_val,epochs = 50 ,optimizer = 'SGD',verbose = True, crossVal = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
h_prev,C_prev = np.zeros((self.batch_size,self.hidden_dim)),np.zeros((self.batch_size,self.hidden_dim))
for i in range(round(X.shape[0]/self.batch_size) - 1):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
# Feeding random indexes:
X_feed = X[index]
y_feed = Y[index]
# Forward + BPTT + SGD:
cache_train = self.forward(X_feed,h_prev,C_prev)
h,c,grads = self.BPTT(cache_train,y_feed)
if optimizer == 'SGD':
self.SGD(grads)
elif optimizer == 'AdaGrad' :
self.AdaGrad(grads)
elif optimizer == 'RMSprop':
self.RMSprop(grads)
elif optimizer == 'VanillaAdam':
self.VanillaAdam(grads)
else:
self.Adam(grads)
# Hidden state -------> Previous hidden state
# Cell state ---------> Previous cell state
h_prev,C_prev = h,c
# Training metrics calculations:
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[8][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
# Validation metrics calculations:
test_prevs = np.zeros((X_val.shape[0],self.hidden_dim))
_,__,___,____,_____,______,_______,________,probs_test = self.forward(X_val,test_prevs,test_prevs)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def params(self):
"""
Return all weights/biases in sequential order starting from end in list form.
"""
return [self.W,self.B,self.W_o,self.B_o,self.W_c,self.B_c,self.W_i,self.B_i,self.W_f,self.B_f]
def SGD(self,grads):
"""
Stochastic gradient descent with momentum on mini-batches.
"""
prevs = []
for param,grad,prev_update in zip(self.params(),grads,self.previous_updates):
delta = self.learning_rate * grad - self.mom_coeff * prev_update
param -= delta
prevs.append(delta)
self.previous_updates = prevs
self.learning_rate *= 0.99999
def AdaGrad(self,grads):
"""
AdaGrad adaptive optimization algorithm.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache[i] += grad **2
param += -self.learning_rate * grad / (np.sqrt(self.cache[i]) + 1e-6)
i += 1
def RMSprop(self,grads,decay_rate = 0.9):
"""
RMSprop adaptive optimization algorithm
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache_rmsprop[i] = decay_rate * self.cache_rmsprop[i] + (1-decay_rate) * grad **2
param += - self.learning_rate * grad / (np.sqrt(self.cache_rmsprop[i])+ 1e-6)
i += 1
def VanillaAdam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, but bias correction is not implemented
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
param += -self.learning_rate * self.m[i] / (np.sqrt(self.v[i]) + 1e-8)
i += 1
def Adam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, bias correction is implemented.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
m_corrected = self.m[i] / (1-beta1**self.t)
v_corrected = self.v[i] / (1-beta2**self.t)
param += -self.learning_rate * m_corrected / (np.sqrt(v_corrected) + 1e-8)
i += 1
self.t +=1
def CategoricalCrossEntropy(self,labels,preds):
"""
Computes cross entropy between labels and model's predictions
"""
predictions = np.clip(preds, 1e-12, 1. - 1e-12)
N = predictions.shape[0]
return -np.sum(labels * np.log(predictions + 1e-9)) / N
def predict(self,X):
"""
Return predictions, (not one hot encoded format)
"""
# Give zeros to hidden/cell states:
pasts = np.zeros((X.shape[0],self.hidden_dim))
_,__,___,____,_____,______,_______,_______,probs = self.forward(X,pasts,pasts)
return np.argmax(probs[149],axis=1)
def history(self):
return {'TrainLoss' : self.train_loss,
'TrainAcc' : self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
lstm = LSTM(learning_rate = 5e-4,mom_coeff = 0.0,batch_size = 32,hidden_dim=128)
# %%
lstm.fit(X_train,y_train,X_test,y_test,epochs = 15,optimizer='SGD')
# %%
lstm_history = lstm.history()
# %%
train_preds_lstm = lstm.predict(X_train)
test_preds_lstm = lstm.predict(X_test)
confusion_mat_train_lstm = metrics.confusion_matrix(np.argmax(y_train,1),train_preds_lstm)
confusion_mat_test_lstm = metrics.confusion_matrix(np.argmax(y_test,1),test_preds_lstm)
body_movements = ['downstairs','jogging','sitting','standing','upstairs','walking']
confusion_mat_train_lstm.columns = body_movements
confusion_mat_train_lstm.index = body_movements
confusion_mat_test_lstm.columns = body_movements
confusion_mat_test_lstm.index = body_movements
sns.heatmap(confusion_mat_train_lstm/np.sum(confusion_mat_train_lstm), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
sns.heatmap(confusion_mat_test_lstm/np.sum(confusion_mat_test_lstm), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
lstm2 = LSTM(learning_rate = 2e-3,mom_coeff = 0.0,batch_size = 32,hidden_dim=128)
lstm2.fit(X_train,y_train,X_test,y_test,epochs = 15,optimizer='RMSprop')
# %%
lstm2_history = lstm2.history()
# %%
lstm3 = LSTM(learning_rate = 3e-3,mom_coeff = 0.0,batch_size = 32,hidden_dim=128)
lstm3.fit(X_train,y_train,X_test,y_test,epochs = 15,optimizer='Adam')
# %%
lstm4 = LSTM(learning_rate = 1e-3,mom_coeff = 0.0,batch_size = 32,hidden_dim=128)
lstm4.fit(X_train,y_train,X_test,y_test,epochs = 15,optimizer='AdaGrad')
# %%
lstm5 = LSTM(learning_rate = 1e-3,mom_coeff = 0.0,batch_size = 32,hidden_dim=128)
lstm5.fit(X_train,y_train,X_test,y_test,epochs = 15,optimizer='VanillaAdam')
# %%
lstm3_history = lstm3.history()
lstm4_history = lstm4.history()
lstm5_history = lstm5.history()
plt.figure()
plt.plot(lstm_history['TrainAcc'],'-o')
plt.plot(lstm2_history['TrainAcc'],'-o')
plt.plot(lstm3_history['TrainAcc'],'-o')
plt.plot(lstm4_history['TrainAcc'],'-o')
plt.plot(lstm5_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['SGD','RMSprop','Adam','AdaGrad','Vanilla Adam'])
plt.show()
plt.figure()
plt.plot(lstm_history['TestAcc'],'-o')
plt.plot(lstm2_history['TestAcc'],'-o')
plt.plot(lstm3_history['TestAcc'],'-o')
plt.plot(lstm4_history['TestAcc'],'-o')
plt.plot(lstm5_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['SGD','RMSprop','Adam','AdaGrad','Vanilla Adam'])
plt.show()
plt.figure()
plt.plot(lstm_history['TrainLoss'],'-o')
plt.plot(lstm2_history['TrainLoss'],'-o')
plt.plot(lstm3_history['TrainLoss'],'-o')
plt.plot(lstm4_history['TrainLoss'],'-o')
plt.plot(lstm5_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Loss over epochs')
plt.legend(['SGD','RMSprop','Adam','AdaGrad','Vanilla Adam'])
plt.show()
plt.figure()
plt.plot(lstm_history['TestLoss'],'-o')
plt.plot(lstm2_history['TestLoss'],'-o')
plt.plot(lstm3_history['TestLoss'],'-o')
plt.plot(lstm4_history['TestLoss'],'-o')
plt.plot(lstm5_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.legend(['SGD','RMSprop','Adam','AdaGrad','Vanilla Adam'])
plt.show()
# %%
three_layer_rnn_v2_history = three_layer_rnn_v2.history()
plt.figure()
plt.plot(three_layer_rnn_v2_history['TrainAcc'],'-o')
plt.plot(lstm_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['Best RNN','Best LSTM'])
plt.show()
plt.figure()
plt.plot(three_layer_rnn_v2_history['TestAcc'],'-o')
plt.plot(lstm_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['Best RNN','Best LSTM'])
plt.show()
plt.figure()
plt.plot(three_layer_rnn_v2_history['TrainLoss'],'-o')
plt.plot(lstm_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Loss over epochs')
plt.legend(['Best RNN','Best LSTM'])
plt.show()
plt.figure()
plt.plot(three_layer_rnn_v2_history['TestLoss'],'-o')
plt.plot(lstm_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.legend(['Best RNN','Best LSTM'])
plt.show()
# %%
train_preds_lstm = lstm3.predict(X_train)
test_preds_lstm = lstm3.predict(X_test)
confusion_mat_train_lstm = metrics.confusion_matrix(np.argmax(y_train,1),train_preds_lstm)
confusion_mat_test_lstm = metrics.confusion_matrix(np.argmax(y_test,1),test_preds_lstm)
body_movements = ['downstairs','jogging','sitting','standing','upstairs','walking']
confusion_mat_train_lstm.columns = body_movements
confusion_mat_train_lstm.index = body_movements
confusion_mat_test_lstm.columns = body_movements
confusion_mat_test_lstm.index = body_movements
sns.heatmap(confusion_mat_train_lstm/np.sum(confusion_mat_train_lstm), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
sns.heatmap(confusion_mat_test_lstm/np.sum(confusion_mat_test_lstm), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
# %%
class Multi_Layer_LSTM(object):
"""
Long-Short Term Memory Recurrent neural network, encapsulates all necessary logic for training, then built the hyperparameters and architecture of the network.
"""
def __init__(self,input_dim = 3,hidden_dim_1 = 128,hidden_dim_2 =64,output_class = 6,seq_len = 150,batch_size = 30,learning_rate = 1e-1,mom_coeff = 0.85):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim_1 = hidden_dim_1
self.hidden_dim_2 = hidden_dim_2
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
self.input_stack_dim = self.input_dim + self.hidden_dim_1
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim1 = Xavier(self.input_dim,self.hidden_dim_1)
self.W_f = np.random.uniform(-lim1,lim1,(self.input_stack_dim,self.hidden_dim_1))
self.B_f = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
self.W_i = np.random.uniform(-lim1,lim1,(self.input_stack_dim,self.hidden_dim_1))
self.B_i = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
self.W_c = np.random.uniform(-lim1,lim1,(self.input_stack_dim,self.hidden_dim_1))
self.B_c = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
self.W_o = np.random.uniform(-lim1,lim1,(self.input_stack_dim,self.hidden_dim_1))
self.B_o = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
lim2 = Xavier(self.hidden_dim_1,self.hidden_dim_2)
self.W_hid = np.random.uniform(-lim2,lim2,(self.hidden_dim_1,self.hidden_dim_2))
self.B_hid = np.random.uniform(-lim2,lim2,(1,self.hidden_dim_2))
lim3 = Xavier(self.hidden_dim_2,self.output_class)
self.W = np.random.uniform(-lim3,lim3,(self.hidden_dim_2,self.output_class))
self.B = np.random.uniform(-lim3,lim3,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# To keep previous updates in momentum :
self.previous_updates = [0] * 13
# For AdaGrad:
self.cache = [0] * 13
self.cache_rmsprop = [0] * 13
self.m = [0] * 13
self.v = [0] * 13
self.t = 1
def cell_forward(self,X,h_prev,C_prev):
"""
Takes input, previous hidden state and previous cell state, compute:
--- Forget gate + Input gate + New candidate input + New cell state +
output gate + hidden state. Then, classify by softmax.
"""
#print(X.shape,h_prev.shape)
# Stacking previous hidden state vector with inputs:
stack = np.column_stack([X,h_prev])
# Forget gate:
forget_gate = activations.sigmoid(np.dot(stack,self.W_f) + self.B_f)
# İnput gate:
input_gate = activations.sigmoid(np.dot(stack,self.W_i) + self.B_i)
# New candidate:
cell_bar = np.tanh(np.dot(stack,self.W_c) + self.B_c)
# New Cell state:
cell_state = forget_gate * C_prev + input_gate * cell_bar
# Output fate:
output_gate = activations.sigmoid(np.dot(stack,self.W_o) + self.B_o)
# Hidden state:
hidden_state = output_gate * np.tanh(cell_state)
# Classifiers (Softmax) :
dense_hid = np.dot(hidden_state,self.W_hid) + self.B_hid
act = activations.ReLU(dense_hid)
dense = np.dot(act,self.W) + self.B
probs = activations.softmax(dense)
return (stack,forget_gate,input_gate,cell_bar,cell_state,output_gate,hidden_state,dense,probs,dense_hid,act)
def forward(self,X,h_prev,C_prev):
x_s,z_s,f_s,i_s = {},{},{},{}
C_bar_s,C_s,o_s,h_s = {},{},{},{}
v_s,y_s,v_1s,y_1s = {},{},{},{}
h_s[-1] = np.copy(h_prev)
C_s[-1] = np.copy(C_prev)
for t in range(self.seq_len):
x_s[t] = X[:,t,:]
z_s[t], f_s[t], i_s[t], C_bar_s[t], C_s[t], o_s[t], h_s[t],v_s[t], y_s[t],v_1s[t],y_1s[t] = self.cell_forward(x_s[t],h_s[t-1],C_s[t-1])
return (z_s, f_s, i_s, C_bar_s, C_s, o_s, h_s,v_s, y_s,v_1s,y_1s)
def BPTT(self,outs,Y):
z_s, f_s, i_s, C_bar_s, C_s, o_s, h_s,v_s, y_s,v_1s,y_1s = outs
dW_f, dW_i,dW_c, dW_o,dW,dW_hid = np.zeros_like(self.W_f), np.zeros_like(self.W_i), np.zeros_like(self.W_c),np.zeros_like(self.W_o),np.zeros_like(self.W),np.zeros_like(self.W_hid)
dB_f, dB_i,dB_c,dB_o,dB,dB_hid = np.zeros_like(self.B_f), np.zeros_like(self.B_i),np.zeros_like(self.B_c),np.zeros_like(self.B_o),np.zeros_like(self.B),np.zeros_like(self.B_hid)
dh_next = np.zeros_like(h_s[0])
dC_next = np.zeros_like(C_s[0])
# w.r.t. softmax input
ddense = np.copy(y_s[149])
ddense[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#ddense[np.argmax(Y,1)] -=1
#ddense = y_s[149] - Y
# Softmax classifier's :
dW = np.dot(v_1s[149].T,ddense)
dB = np.sum(ddense,axis = 0, keepdims = True)
ddense_hid = np.dot(ddense,self.W.T) * activations.dReLU(v_1s[149])
dW_hid = np.dot(h_s[149].T,ddense_hid)
dB_hid = np.sum(ddense_hid,axis = 0, keepdims = True)
# Backprop through time:
for t in reversed(range(1,self.seq_len)):
# Just equating more meaningful names
stack,forget_gate,input_gate,cell_bar,cell_state,output_gate,hidden_state,dense,probs = z_s[t], f_s[t], i_s[t], C_bar_s[t], C_s[t], o_s[t], h_s[t],v_s[t], y_s[t]
C_prev = C_s[t-1]
# w.r.t. softmax input
#ddense = np.copy(probs)
#ddense[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#ddense[np.arange(len(Y)),np.argmax(Y,1)] -=1
# Softmax classifier's :
#dW += np.dot(hidden_state.T,ddense)
#dB += np.sum(ddense,axis = 0, keepdims = True)
# Output gate :
dh = np.dot(ddense_hid,self.W_hid.T) + dh_next
do = dh * np.tanh(cell_state)
do = do * dsigmoid(output_gate)
dW_o += np.dot(stack.T,do)
dB_o += np.sum(do,axis = 0, keepdims = True)
# Cell state:
dC = np.copy(dC_next)
dC += dh * output_gate * activations.dtanh(cell_state)
dC_bar = dC * input_gate
dC_bar = dC_bar * dtanh(cell_bar)
dW_c += np.dot(stack.T,dC_bar)
dB_c += np.sum(dC_bar,axis = 0, keepdims = True)
# Input gate:
di = dC * cell_bar
di = dsigmoid(input_gate) * di
dW_i += np.dot(stack.T,di)
dB_i += np.sum(di,axis = 0,keepdims = True)
# Forget gate:
df = dC * C_prev
df = df * dsigmoid(forget_gate)
dW_f += np.dot(stack.T,df)
dB_f += np.sum(df,axis = 0, keepdims = True)
dz = np.dot(df,self.W_f.T) + np.dot(di,self.W_i.T) + np.dot(dC_bar,self.W_c.T) + np.dot(do,self.W_o.T)
dh_next = dz[:,-self.hidden_dim_1:]
dC_next = forget_gate * dC
# List of gradients :
grads = [dW,dB,dW_hid,dB_hid,dW_o,dB_o,dW_c,dB_c,dW_i,dB_i,dW_f,dB_f]
# Clipping gradients anyway
for grad in grads:
np.clip(grad, -15, 15, out = grad)
return h_s[self.seq_len - 1],C_s[self.seq_len -1 ],grads
def fit(self,X,Y,X_val,y_val,epochs = 50 ,optimizer = 'SGD',verbose = True, crossVal = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
h_prev,C_prev = np.zeros((self.batch_size,self.hidden_dim_1)),np.zeros((self.batch_size,self.hidden_dim_1))
for i in range(round(X.shape[0]/self.batch_size) - 1):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
# Feeding random indexes:
X_feed = X[index]
y_feed = Y[index]
# Forward + BPTT + SGD:
cache_train = self.forward(X_feed,h_prev,C_prev)
h,c,grads = self.BPTT(cache_train,y_feed)
if optimizer == 'SGD':
self.SGD(grads)
elif optimizer == 'AdaGrad' :
self.AdaGrad(grads)
elif optimizer == 'RMSprop':
self.RMSprop(grads)
elif optimizer == 'VanillaAdam':
self.VanillaAdam(grads)
else:
self.Adam(grads)
# Hidden state -------> Previous hidden state
# Cell state ---------> Previous cell state
h_prev,C_prev = h,c
# Training metrics calculations:
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[8][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
# Validation metrics calculations:
test_prevs = np.zeros((X_val.shape[0],self.hidden_dim_1))
_,__,___,____,_____,______,_______,________,probs_test,a,b = self.forward(X_val,test_prevs,test_prevs)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def params(self):
"""
Return all weights/biases in sequential order starting from end in list form.
"""
return [self.W,self.B,self.W_hid,self.B_hid,self.W_o,self.B_o,self.W_c,self.B_c,self.W_i,self.B_i,self.W_f,self.B_f]
def SGD(self,grads):
"""
Stochastic gradient descent with momentum on mini-batches.
"""
prevs = []
for param,grad,prev_update in zip(self.params(),grads,self.previous_updates):
delta = self.learning_rate * grad - self.mom_coeff * prev_update
param -= delta
prevs.append(delta)
self.previous_updates = prevs
self.learning_rate *= 0.99999
def AdaGrad(self,grads):
"""
AdaGrad adaptive optimization algorithm.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache[i] += grad **2
param += -self.learning_rate * grad / (np.sqrt(self.cache[i]) + 1e-6)
i += 1
def RMSprop(self,grads,decay_rate = 0.9):
"""
RMSprop adaptive optimization algorithm
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache_rmsprop[i] = decay_rate * self.cache_rmsprop[i] + (1-decay_rate) * grad **2
param += - self.learning_rate * grad / (np.sqrt(self.cache_rmsprop[i])+ 1e-6)
i += 1
def VanillaAdam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, but bias correction is not implemented
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
param += -self.learning_rate * self.m[i] / (np.sqrt(self.v[i]) + 1e-8)
i += 1
def Adam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, bias correction is implemented.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
m_corrected = self.m[i] / (1-beta1**self.t)
v_corrected = self.v[i] / (1-beta2**self.t)
param += -self.learning_rate * m_corrected / (np.sqrt(v_corrected) + 1e-8)
i += 1
self.t +=1
def CategoricalCrossEntropy(self,labels,preds):
"""
Computes cross entropy between labels and model's predictions
"""
predictions = np.clip(preds, 1e-12, 1. - 1e-12)
N = predictions.shape[0]
return -np.sum(labels * np.log(predictions + 1e-9)) / N
def predict(self,X):
"""
Return predictions, (not one hot encoded format)
"""
# Give zeros to hidden/cell states:
pasts = np.zeros((X.shape[0],self.hidden_dim_1))
_,__,___,____,_____,______,_______,_______,probs,a,b = self.forward(X,pasts,pasts)
return np.argmax(probs[149],axis=1)
def history(self):
return {'TrainLoss' : self.train_loss,
'TrainAcc' : self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
mutl_layer_lstm = Multi_Layer_LSTM(learning_rate=1e-3,batch_size=32,hidden_dim_1 = 128,hidden_dim_2=64,mom_coeff=0.0)
mutl_layer_lstm.fit(X_train,y_train,X_test,y_test,epochs=15,optimizer='Adam')
# %%
mutl_layer_lstm_history = mutl_layer_lstm.history()
plt.figure()
plt.plot(mutl_layer_lstm_history['TrainAcc'],'-o')
plt.plot(lstm_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['Multi Layer LSTM','LSTM'])
plt.show()
plt.figure()
plt.plot(mutl_layer_lstm_history['TestAcc'],'-o')
plt.plot(lstm_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['Multi Layer LSTM','LSTM'])
plt.show()
plt.figure()
plt.plot(mutl_layer_lstm_history['TrainLoss'],'-o')
plt.plot(lstm_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Loss over epochs')
plt.legend(['Multi Layer LSTM','LSTM'])
plt.show()
plt.figure()
plt.plot(mutl_layer_lstm_history['TestLoss'],'-o')
plt.plot(lstm_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.legend(['Multi Layer LSTM','LSTM'])
plt.show()
# %%
mutl_layer_lstm.fit(X_train,y_train,X_test,y_test,epochs=15,optimizer = 'Vanilla')
# %%
mutl_layer_lstm_history = mutl_layer_lstm.history()
# %%
plt.figure()
plt.plot(mutl_layer_lstm_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.show()
plt.figure()
plt.plot(mutl_layer_lstm_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.show()
plt.figure()
plt.plot(mutl_layer_lstm_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.show()
plt.figure()
plt.plot(mutl_layer_lstm_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.show()
# %%
class GRU(object):
"""
Gater recurrent unit, encapsulates all necessary logic for training, then built the hyperparameters and architecture of the network.
"""
def __init__(self,input_dim = 3,hidden_dim = 128,output_class = 6,seq_len = 150,batch_size = 32,learning_rate = 1e-1,mom_coeff = 0.85):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(32)
self.input_dim = input_dim
self.hidden_dim = hidden_dim
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim1 = Xavier(self.input_dim,self.hidden_dim)
lim1_hid = Xavier(self.hidden_dim,self.hidden_dim)
self.W_z = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim))
self.U_z = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim,self.hidden_dim))
self.B_z = np.random.uniform(-lim1,lim1,(1,self.hidden_dim))
self.W_r = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim))
self.U_r = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim,self.hidden_dim))
self.B_r = np.random.uniform(-lim1,lim1,(1,self.hidden_dim))
self.W_h = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim))
self.U_h = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim,self.hidden_dim))
self.B_h = np.random.uniform(-lim1,lim1,(1,self.hidden_dim))
lim2 = Xavier(self.hidden_dim,self.output_class)
self.W = np.random.uniform(-lim2,lim2,(self.hidden_dim,self.output_class))
self.B = np.random.uniform(-lim2,lim2,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# To keep previous updates in momentum :
self.previous_updates = [0] * 10
# For AdaGrad:
self.cache = [0] * 11
self.cache_rmsprop = [0] * 11
self.m = [0] * 11
self.v = [0] * 11
self.t = 1
def cell_forward(self,X,h_prev):
"""
Takes input, previous hidden state and previous cell state, compute:
--- Forget gate + Input gate + New candidate input + New cell state +
output gate + hidden state. Then, classify by softmax.
"""
# Update gate:
update_gate = activations.sigmoid(np.dot(X,self.W_z) + np.dot(h_prev,self.U_z) + self.B_z)
# Reset gate:
reset_gate = activations.sigmoid(np.dot(X,self.W_r) + np.dot(h_prev,self.U_r) + self.B_r)
# Current memory content:
h_hat = np.tanh(np.dot(X,self.W_h) + np.dot(np.multiply(reset_gate,h_prev),self.U_h) + self.B_h)
# Hidden state:
hidden_state = np.multiply(update_gate,h_prev) + np.multiply((1-update_gate),h_hat)
# Classifiers (Softmax) :
dense = np.dot(hidden_state,self.W) + self.B
probs = activations.softmax(dense)
return (update_gate,reset_gate,h_hat,hidden_state,dense,probs)
def forward(self,X,h_prev):
x_s,z_s,r_s,h_hat = {},{},{},{}
h_s = {}
y_s,p_s = {},{}
h_s[-1] = np.copy(h_prev)
for t in range(self.seq_len):
x_s[t] = X[:,t,:]
z_s[t], r_s[t], h_hat[t], h_s[t], y_s[t], p_s[t] = self.cell_forward(x_s[t],h_s[t-1])
return (x_s,z_s, r_s, h_hat, h_s, y_s, p_s)
def BPTT(self,outs,Y):
x_s,z_s, r_s, h_hat, h_s, y_s, p_s = outs
dW_z, dW_r,dW_h, dW = np.zeros_like(self.W_z), np.zeros_like(self.W_r), np.zeros_like(self.W_h),np.zeros_like(self.W)
dU_z, dU_r,dU_h, = np.zeros_like(self.U_z), np.zeros_like(self.U_r), np.zeros_like(self.U_h)
dB_z, dB_r,dB_h,dB = np.zeros_like(self.B_z), np.zeros_like(self.B_r),np.zeros_like(self.B_h),np.zeros_like(self.B)
dh_next = np.zeros_like(h_s[0])
# w.r.t. softmax input
ddense = np.copy(p_s[149])
ddense[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#ddense[np.argmax(Y,1)] -=1
#ddense = y_s[149] - Y
# Softmax classifier's :
dW = np.dot(h_s[149].T,ddense)
dB = np.sum(ddense,axis = 0, keepdims = True)
# Backprop through time:
for t in reversed(range(1,self.seq_len)):
# w.r.t. softmax input
#ddense = np.copy(probs)
#ddense[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#ddense[np.arange(len(Y)),np.argmax(Y,1)] -=1
# Softmax classifier's :
#dW += np.dot(hidden_state.T,ddense)
#dB += np.sum(ddense,axis = 0, keepdims = True)
# Curernt memort state :
dh = np.dot(ddense,self.W.T) + dh_next
dh_hat = dh * (1-z_s[t])
dh_hat = dh_hat * dtanh(h_hat[t])
dW_h += np.dot(x_s[t].T,dh_hat)
dU_h += np.dot((r_s[t] * h_s[t-1]).T,dh_hat)
dB_h += np.sum(dh_hat,axis = 0, keepdims = True)
# Reset gate:
dr_1 = np.dot(dh_hat,self.U_h.T)
dr = dr_1 * h_s[t-1]
dr = dr * dsigmoid(r_s[t])
dW_r += np.dot(x_s[t].T,dr)
dU_r += np.dot(h_s[t-1].T,dr)
dB_r += np.sum(dr,axis = 0, keepdims = True)
# Forget gate:
dz = dh * (h_s[t-1] - h_hat[t])
dz = dz * dsigmoid(z_s[t])
dW_z += np.dot(x_s[t].T,dz)
dU_z += np.dot(h_s[t-1].T,dz)
dB_z += np.sum(dz,axis = 0, keepdims = True)
# Nexts:
dh_next = np.dot(dz,self.U_z.T) + (dh * z_s[t]) + (dr_1 * r_s[t]) + np.dot(dr,self.U_r.T)
# List of gradients :
grads = [dW,dB,dW_z,dU_z,dB_z,dW_r,dU_r,dB_r,dW_h,dU_h,dB_h]
# Clipping gradients anyway
for grad in grads:
np.clip(grad, -15, 15, out = grad)
return h_s[self.seq_len - 1],grads
def fit(self,X,Y,X_val,y_val,epochs = 50 ,optimizer = 'SGD',verbose = True, crossVal = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
h_prev = np.zeros((self.batch_size,self.hidden_dim))
for i in range(round(X.shape[0]/self.batch_size) - 1):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
# Feeding random indexes:
X_feed = X[index]
y_feed = Y[index]
# Forward + BPTT + SGD:
cache_train = self.forward(X_feed,h_prev)
h,grads = self.BPTT(cache_train,y_feed)
if optimizer == 'SGD':
self.SGD(grads)
elif optimizer == 'AdaGrad' :
self.AdaGrad(grads)
elif optimizer == 'RMSprop':
self.RMSprop(grads)
elif optimizer == 'VanillaAdam':
self.VanillaAdam(grads)
else:
self.Adam(grads)
# Hidden state -------> Previous hidden state
h_prev= h
# Training metrics calculations:
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[6][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
# Validation metrics calculations:
test_prevs = np.zeros((X_val.shape[0],self.hidden_dim))
_,__,___,____,_____,______,probs_test = self.forward(X_val,test_prevs)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def params(self):
"""
Return all weights/biases in sequential order starting from end in list form.
"""
return [self.W,self.B,self.W_z,self.U_z,self.B_z,self.W_r,self.U_r,self.B_r,self.W_h,self.U_h,self.B_h]
def SGD(self,grads):
"""
Stochastic gradient descent with momentum on mini-batches.
"""
prevs = []
for param,grad,prev_update in zip(self.params(),grads,self.previous_updates):
delta = self.learning_rate * grad - self.mom_coeff * prev_update
param -= delta
prevs.append(delta)
self.previous_updates = prevs
self.learning_rate *= 0.99999
def AdaGrad(self,grads):
"""
AdaGrad adaptive optimization algorithm.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache[i] += grad **2
param += -self.learning_rate * grad / (np.sqrt(self.cache[i]) + 1e-6)
i += 1
def RMSprop(self,grads,decay_rate = 0.9):
"""
RMSprop adaptive optimization algorithm
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache_rmsprop[i] = decay_rate * self.cache_rmsprop[i] + (1-decay_rate) * grad **2
param += - self.learning_rate * grad / (np.sqrt(self.cache_rmsprop[i])+ 1e-6)
i += 1
def VanillaAdam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, but bias correction is not implemented
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
param += -self.learning_rate * self.m[i] / (np.sqrt(self.v[i]) + 1e-8)
i += 1
def Adam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, bias correction is implemented.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
m_corrected = self.m[i] / (1-beta1**self.t)
v_corrected = self.v[i] / (1-beta2**self.t)
param += -self.learning_rate * m_corrected / (np.sqrt(v_corrected) + 1e-8)
i += 1
self.t +=1
def CategoricalCrossEntropy(self,labels,preds):
"""
Computes cross entropy between labels and model's predictions
"""
predictions = np.clip(preds, 1e-12, 1. - 1e-12)
N = predictions.shape[0]
return -np.sum(labels * np.log(predictions + 1e-9)) / N
def predict(self,X):
"""
Return predictions, (not one hot encoded format)
"""
# Give zeros to hidden/cell states:
pasts = np.zeros((X.shape[0],self.hidden_dim))
_,__,___,____,_____,______,probs = self.forward(X,pasts)
return np.argmax(probs[149],axis=1)
def history(self):
return {'TrainLoss' : self.train_loss,
'TrainAcc' : self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
gru = GRU(hidden_dim=128,learning_rate=1e-3,batch_size=32,mom_coeff=0.0)
# %%
gru.fit(X_train,y_train,X_test,y_test,epochs = 15,optimizer = 'RMSprop')
# %%
gru_history = gru.history()
# %%
# For figure 97:
plt.figure()
plt.plot(gru_history['TrainLoss'],'-o')
plt.plot(gru_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Loss over epochs')
plt.legend(['Train Loss','Test Loss'])
plt.show()
plt.figure()
plt.plot(gru_history['TrainAcc'],'-o')
plt.plot(gru_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Accuracy over epochs')
plt.legend(['Train Acc','Test Acc'])
plt.show()
# %%
# For figure 98:
multi_layer_gru_history = multi_layer_gru.history()
plt.figure()
plt.plot(multi_layer_gru_history['TrainAcc'],'-o')
plt.plot(gru_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['Multi Layer GRU','GRU'])
plt.show()
plt.figure()
plt.plot(multi_layer_gru_history['TestAcc'],'-o')
plt.plot(gru_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['Multi Layer GRU','GRU'])
plt.show()
plt.figure()
plt.plot(multi_layer_gru_history['TrainLoss'],'-o')
plt.plot(gru_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Loss over epochs')
plt.legend(['Multi Layer GRU','GRU'])
plt.show()
plt.figure()
plt.plot(multi_layer_gru_history['TestLoss'],'-o')
plt.plot(gru_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.legend(['Multi Layer GRU','GRU'])
plt.show()
# %%
# For figure 99:
three_layer_rnn_history = three_layer_rnn.history()
plt.figure()
plt.plot(gru_history['TrainAcc'],'-o')
plt.plot(lstm_history['TrainAcc'],'-o')
plt.plot(three_layer_rnn_history['TrainAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Accuracy over epochs')
plt.legend(['GRU','LSTM','RNN'])
plt.show()
plt.figure()
plt.plot(gru_history['TestAcc'],'-o')
plt.plot(lstm_history['TestAcc'],'-o')
plt.plot(three_layer_rnn_history['TestAcc'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Accuracy over epochs')
plt.legend(['GRU','LSTM','RNN'])
plt.show()
plt.figure()
plt.plot(gru_history['TrainLoss'],'-o')
plt.plot(lstm_history['TrainLoss'],'-o')
plt.plot(three_layer_rnn_history['TrainLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Training Loss over epochs')
plt.legend(['GRU','LSTM','RNN'])
plt.show()
plt.figure()
plt.plot(gru_history['TestLoss'],'-o')
plt.plot(lstm_history['TestLoss'],'-o')
plt.plot(three_layer_rnn_history['TestLoss'],'-o')
plt.xlabel('# of epochs')
plt.ylabel('Loss')
plt.title('Testing Loss over epochs')
plt.legend(['GRU','LSTM','RNN'])
plt.show()
# %%
train_preds_gru = gru.predict(X_train)
test_preds_gru = gru.predict(X_test)
confusion_mat_train_gru = metrics.confusion_matrix(np.argmax(y_train,1),train_preds_gru)
confusion_mat_test_gru = metrics.confusion_matrix(np.argmax(y_test,1),test_preds_gru)
body_movements = ['downstairs','jogging','sitting','standing','upstairs','walking']
confusion_mat_train_gru.columns = body_movements
confusion_mat_train_gru.index = body_movements
confusion_mat_test_gru.columns = body_movements
confusion_mat_test_gru.index = body_movements
sns.heatmap(confusion_mat_train_gru/np.sum(confusion_mat_train_gru), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
sns.heatmap(confusion_mat_test_gru/np.sum(confusion_mat_test_gru), annot=True,
fmt='.2%',cmap = 'Blues')
plt.show()
# %%
class Multi_layer_GRU(object):
"""
Gater recurrent unit, encapsulates all necessary logic for training, then built the hyperparameters and architecture of the network.
"""
def __init__(self,input_dim = 3,hidden_dim_1 = 128,hidden_dim_2 = 64,output_class = 6,seq_len = 150,batch_size = 32,learning_rate = 1e-1,mom_coeff = 0.85):
"""
Initialization of weights/biases and other configurable parameters.
"""
np.random.seed(150)
self.input_dim = input_dim
self.hidden_dim_1 = hidden_dim_1
self.hidden_dim_2 = hidden_dim_2
# Unfold case T = 150 :
self.seq_len = seq_len
self.output_class = output_class
self.learning_rate = learning_rate
self.batch_size = batch_size
self.mom_coeff = mom_coeff
# Xavier uniform scaler :
Xavier = lambda fan_in,fan_out : math.sqrt(6/(fan_in + fan_out))
lim1 = Xavier(self.input_dim,self.hidden_dim_1)
lim1_hid = Xavier(self.hidden_dim_1,self.hidden_dim_1)
self.W_z = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim_1))
self.U_z = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim_1,self.hidden_dim_1))
self.B_z = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
self.W_r = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim_1))
self.U_r = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim_1,self.hidden_dim_1))
self.B_r = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
self.W_h = np.random.uniform(-lim1,lim1,(self.input_dim,self.hidden_dim_1))
self.U_h = np.random.uniform(-lim1_hid,lim1_hid,(self.hidden_dim_1,self.hidden_dim_1))
self.B_h = np.random.uniform(-lim1,lim1,(1,self.hidden_dim_1))
lim2_hid = Xavier(self.hidden_dim_1,self.hidden_dim_2)
self.W_hid = np.random.uniform(-lim2_hid,lim2_hid,(self.hidden_dim_1,self.hidden_dim_2))
self.B_hid = np.random.uniform(-lim2_hid,lim2_hid,(1,self.hidden_dim_2))
lim2 = Xavier(self.hidden_dim_2,self.output_class)
self.W = np.random.uniform(-lim2,lim2,(self.hidden_dim_2,self.output_class))
self.B = np.random.uniform(-lim2,lim2,(1,self.output_class))
# To keep track loss and accuracy score :
self.train_loss,self.test_loss,self.train_acc,self.test_acc = [],[],[],[]
# To keep previous updates in momentum :
self.previous_updates = [0] * 13
# For AdaGrad:
self.cache = [0] * 13
self.cache_rmsprop = [0] * 13
self.m = [0] * 13
self.v = [0] * 13
self.t = 1
def cell_forward(self,X,h_prev):
# Update gate:
update_gate = activations.sigmoid(np.dot(X,self.W_z) + np.dot(h_prev,self.U_z) + self.B_z)
# Reset gate:
reset_gate = activations.sigmoid(np.dot(X,self.W_r) + np.dot(h_prev,self.U_r) + self.B_r)
# Current memory content:
h_hat = np.tanh(np.dot(X,self.W_h) + np.dot(np.multiply(reset_gate,h_prev),self.U_h) + self.B_h)
# Hidden state:
hidden_state = np.multiply(update_gate,h_prev) + np.multiply((1-update_gate),h_hat)
# Hidden MLP:
hid_dense = np.dot(hidden_state,self.W_hid) + self.B_hid
relu = activations.ReLU(hid_dense)
# Classifiers (Softmax) :
dense = np.dot(relu,self.W) + self.B
probs = activations.softmax(dense)
return (update_gate,reset_gate,h_hat,hidden_state,hid_dense,relu,dense,probs)
def forward(self,X,h_prev):
x_s,z_s,r_s,h_hat = {},{},{},{}
h_s = {}
hd_s,relu_s = {},{}
y_s,p_s = {},{}
h_s[-1] = np.copy(h_prev)
for t in range(self.seq_len):
x_s[t] = X[:,t,:]
z_s[t], r_s[t], h_hat[t], h_s[t],hd_s[t],relu_s[t], y_s[t], p_s[t] = self.cell_forward(x_s[t],h_s[t-1])
return (x_s,z_s, r_s, h_hat, h_s, hd_s,relu_s, y_s, p_s)
def BPTT(self,outs,Y):
x_s,z_s, r_s, h_hat, h_s, hd_s,relu_s, y_s, p_s = outs
dW_z, dW_r,dW_h, dW = np.zeros_like(self.W_z), np.zeros_like(self.W_r), np.zeros_like(self.W_h),np.zeros_like(self.W)
dW_hid = np.zeros_like(self.W_hid)
dU_z, dU_r,dU_h = np.zeros_like(self.U_z), np.zeros_like(self.U_r), np.zeros_like(self.U_h)
dB_z, dB_r,dB_h,dB = np.zeros_like(self.B_z), np.zeros_like(self.B_r),np.zeros_like(self.B_h),np.zeros_like(self.B)
dB_hid = np.zeros_like(self.B_hid)
dh_next = np.zeros_like(h_s[0])
# w.r.t. softmax input
ddense = np.copy(p_s[149])
ddense[np.arange(len(Y)),np.argmax(Y,1)] -= 1
#ddense[np.argmax(Y,1)] -=1
#ddense = y_s[149] - Y
# Softmax classifier's :
dW = np.dot(relu_s[149].T,ddense)
dB = np.sum(ddense,axis = 0, keepdims = True)
ddense_hid = np.dot(ddense,self.W.T) * activations.dReLU(hd_s[149])
dW_hid = np.dot(h_s[149].T,ddense_hid)
dB_hid = np.sum(ddense_hid,axis = 0, keepdims = True)
# Backprop through time:
for t in reversed(range(1,self.seq_len)):
# Curernt memort state :
dh = np.dot(ddense_hid,self.W_hid.T) + dh_next
dh_hat = dh * (1-z_s[t])
dh_hat = dh_hat * dtanh(h_hat[t])
dW_h += np.dot(x_s[t].T,dh_hat)
dU_h += np.dot((r_s[t] * h_s[t-1]).T,dh_hat)
dB_h += np.sum(dh_hat,axis = 0, keepdims = True)
# Reset gate:
dr_1 = np.dot(dh_hat,self.U_h.T)
dr = dr_1 * h_s[t-1]
dr = dr * dsigmoid(r_s[t])
dW_r += np.dot(x_s[t].T,dr)
dU_r += np.dot(h_s[t-1].T,dr)
dB_r += np.sum(dr,axis = 0, keepdims = True)
# Forget gate:
dz = dh * (h_s[t-1] - h_hat[t])
dz = dz * dsigmoid(z_s[t])
dW_z += np.dot(x_s[t].T,dz)
dU_z += np.dot(h_s[t-1].T,dz)
dB_z += np.sum(dz,axis = 0, keepdims = True)
# Nexts:
dh_next = np.dot(dz,self.U_z.T) + (dh * z_s[t]) + (dr_1 * r_s[t]) + np.dot(dr,self.U_r.T)
# List of gradients :
grads = [dW,dB,dW_hid,dB_hid,dW_z,dU_z,dB_z,dW_r,dU_r,dB_r,dW_h,dU_h,dB_h]
# Clipping gradients anyway
for grad in grads:
np.clip(grad, -15, 15, out = grad)
return h_s[self.seq_len - 1],grads
def fit(self,X,Y,X_val,y_val,epochs = 50 ,optimizer = 'SGD',verbose = True, crossVal = False):
"""
Given the traning dataset,their labels and number of epochs
fitting the model, and measure the performance
by validating training dataset.
"""
for epoch in range(epochs):
print(f'Epoch : {epoch + 1}')
perm = np.random.permutation(3000)
# Equate 0 in every epoch:
h_prev = np.zeros((self.batch_size,self.hidden_dim_1))
for i in range(round(X.shape[0]/self.batch_size) - 1):
batch_start = i * self.batch_size
batch_finish = (i+1) * self.batch_size
index = perm[batch_start:batch_finish]
# Feeding random indexes:
X_feed = X[index]
y_feed = Y[index]
# Forward + BPTT + Optimization:
cache_train = self.forward(X_feed,h_prev)
h,grads = self.BPTT(cache_train,y_feed)
if optimizer == 'SGD':
self.SGD(grads)
elif optimizer == 'AdaGrad' :
self.AdaGrad(grads)
elif optimizer == 'RMSprop':
self.RMSprop(grads)
elif optimizer == 'VanillaAdam':
self.VanillaAdam(grads)
else:
self.Adam(grads)
# Hidden state -------> Previous hidden state
h_prev = h
# Training metrics calculations:
cross_loss_train = self.CategoricalCrossEntropy(y_feed,cache_train[8][149])
predictions_train = self.predict(X)
acc_train = metrics.accuracy(np.argmax(Y,1),predictions_train)
# Validation metrics calculations:
test_prevs = np.zeros((X_val.shape[0],self.hidden_dim_1))
_,__,___,____,_____,______,_______,________,probs_test = self.forward(X_val,test_prevs)
cross_loss_val = self.CategoricalCrossEntropy(y_val,probs_test[149])
predictions_val = np.argmax(probs_test[149],1)
acc_val = metrics.accuracy(np.argmax(y_val,1),predictions_val)
if verbose:
print(f"[{epoch + 1}/{epochs}] ------> Training : Accuracy : {acc_train}")
print(f"[{epoch + 1}/{epochs}] ------> Training : Loss : {cross_loss_train}")
print('______________________________________________________________________________________\n')
print(f"[{epoch + 1}/{epochs}] ------> Testing : Accuracy : {acc_val}")
print(f"[{epoch + 1}/{epochs}] ------> Testing : Loss : {cross_loss_val}")
print('______________________________________________________________________________________\n')
self.train_loss.append(cross_loss_train)
self.test_loss.append(cross_loss_val)
self.train_acc.append(acc_train)
self.test_acc.append(acc_val)
def params(self):
"""
Return all weights/biases in sequential order starting from end in list form.
"""
return [self.W,self.B,self.W_hid,self.B_hid,self.W_z,self.U_z,self.B_z,self.W_r,self.U_r,self.B_r,self.W_h,self.U_h,self.B_h]
def SGD(self,grads):
"""
Stochastic gradient descent with momentum on mini-batches.
"""
prevs = []
for param,grad,prev_update in zip(self.params(),grads,self.previous_updates):
delta = self.learning_rate * grad + self.mom_coeff * prev_update
param -= delta
prevs.append(delta)
self.previous_updates = prevs
self.learning_rate *= 0.99999
def AdaGrad(self,grads):
"""
AdaGrad adaptive optimization algorithm.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache[i] += grad **2
param += -self.learning_rate * grad / (np.sqrt(self.cache[i]) + 1e-6)
i += 1
def RMSprop(self,grads,decay_rate = 0.9):
"""
RMSprop adaptive optimization algorithm
"""
i = 0
for param,grad in zip(self.params(),grads):
self.cache_rmsprop[i] = decay_rate * self.cache_rmsprop[i] + (1-decay_rate) * grad **2
param += - self.learning_rate * grad / (np.sqrt(self.cache_rmsprop[i])+ 1e-6)
i += 1
def VanillaAdam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, but bias correction is not implemented
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
param += -self.learning_rate * self.m[i] / (np.sqrt(self.v[i]) + 1e-8)
i += 1
def Adam(self,grads,beta1 = 0.9,beta2 = 0.999):
"""
Adam optimizer, bias correction is implemented.
"""
i = 0
for param,grad in zip(self.params(),grads):
self.m[i] = beta1 * self.m[i] + (1-beta1) * grad
self.v[i] = beta2 * self.v[i] + (1-beta2) * grad **2
m_corrected = self.m[i] / (1-beta1**self.t)
v_corrected = self.v[i] / (1-beta2**self.t)
param += -self.learning_rate * m_corrected / (np.sqrt(v_corrected) + 1e-8)
i += 1
self.t +=1
def CategoricalCrossEntropy(self,labels,preds):
"""
Computes cross entropy between labels and model's predictions
"""
predictions = np.clip(preds, 1e-12, 1. - 1e-12)
N = predictions.shape[0]
return -np.sum(labels * np.log(predictions + 1e-9)) / N
def predict(self,X):
"""
Return predictions, (not one hot encoded format)
"""
# Give zeros to hidden states:
pasts = np.zeros((X.shape[0],self.hidden_dim_1))
_,__,___,____,_____,______,_______,________,probs = self.forward(X,pasts)
return np.argmax(probs[149],axis=1)
def history(self):
return {'TrainLoss' : self.train_loss,
'TrainAcc' : self.train_acc,
'TestLoss' : self.test_loss,
'TestAcc' : self.test_acc}
# %%
multi_layer_gru = Multi_layer_GRU(hidden_dim_1=128,hidden_dim_2=64,learning_rate=1e-3,mom_coeff=0.0,batch_size=32)
# %%
multi_layer_gru.fit(X_train,y_train,X_test,y_test,epochs = 15,optimizer = 'RMSprop')
can_kocagil_21602218_hw3(question)
| 39.916454
| 227
| 0.506292
|
07efd382cad29753912a8dca26eb897c239dfb3c
| 2,045
|
py
|
Python
|
wsnsims/flower/cell.py
|
king4arabs/wsnsims2
|
13d4c49ea2ae9de7ea42d4aaa8d117b3368cda20
|
[
"MIT"
] | 2
|
2019-11-01T08:42:42.000Z
|
2020-01-02T00:17:39.000Z
|
wsnsims/flower/cell.py
|
king4arabs/wsnsims2
|
13d4c49ea2ae9de7ea42d4aaa8d117b3368cda20
|
[
"MIT"
] | null | null | null |
wsnsims/flower/cell.py
|
king4arabs/wsnsims2
|
13d4c49ea2ae9de7ea42d4aaa8d117b3368cda20
|
[
"MIT"
] | 1
|
2020-01-02T02:46:17.000Z
|
2020-01-02T02:46:17.000Z
|
import numpy as np
from wsnsims.core import point
def side_length(environment):
"""
:param environment:
:type environment: core.environment.Environment
:return: pq.meter
"""
return environment.comms_range / np.sqrt(2)
class Cell(object):
""" Defines a cell in the grid """
count = 0
def __init__(self, row, column, environment):
"""
:param row:
:param column:
:param environment:
:type environment: core.environment.Environment
"""
self.cell_id = Cell.count
Cell.count += 1
# Maintain the grid position.
self.grid_location = np.array([row, column])
# Calculate the physical location of the center of this cell.
side_len = side_length(environment)
x_pos = column * side_len + (side_len / 2.)
y_pos = row * side_len + (side_len / 2.)
self.location = point.Vec2(np.array([x_pos, y_pos])) # * pq.meter
# The segments within radio range of this cell.
self.segments = list()
# The (maximum eight) cells immediately adjacent to this cell.
self.neighbors = list()
# The number of segments within radio range of any neighbor cell
self.signal_hop_count = 0
# The cell distance between this cell and the centroid cell, G.
self.proximity = 0
# The numeric identifier of the virtual cluster this cell belongs to.
self.virtual_cluster_id = -1
# The numeric identifier of the cluster this cell belongs to.
self._cluster_id = -1
@property
def cluster_id(self):
return self._cluster_id
@cluster_id.setter
def cluster_id(self, value):
self._cluster_id = value
@property
def access(self):
"""
The number of segments within radio range of this cell.
"""
return len(self.segments)
def __str__(self):
return "Cell {}".format(self.cell_id)
def __repr__(self):
return "C {}".format(self.cell_id)
| 25.246914
| 77
| 0.61467
|
6c2b3109f5602dd1a5d9ada1b6953c78bb1fcb17
| 6,762
|
py
|
Python
|
pygama/io/orca_helper.py
|
rauscher1995/pygama
|
7357e3fb0be7c6712010e4925d863b0f0f843c27
|
[
"Apache-2.0"
] | null | null | null |
pygama/io/orca_helper.py
|
rauscher1995/pygama
|
7357e3fb0be7c6712010e4925d863b0f0f843c27
|
[
"Apache-2.0"
] | null | null | null |
pygama/io/orca_helper.py
|
rauscher1995/pygama
|
7357e3fb0be7c6712010e4925d863b0f0f843c27
|
[
"Apache-2.0"
] | 1
|
2021-12-18T14:43:33.000Z
|
2021-12-18T14:43:33.000Z
|
import plistlib
import sys
import pandas as pd
import numpy as np
def parse_header(xmlfile):
"""
Opens the given file for binary read ('rb'), then grabs the first 8 bytes
The first 4 bytes (1 long) of an orca data file are the total length in
longs of the record
The next 4 bytes (1 long) is the length of the header in bytes
The header is then read in ...
"""
with open(xmlfile, 'rb') as xmlfile_handle:
#read the first word:
ba = bytearray(xmlfile_handle.read(8))
#Replacing this to be python2 friendly
# #first 4 bytes: header length in long words
# i = int.from_bytes(ba[:4], byteorder=sys.byteorder)
# #second 4 bytes: header length in bytes
# j = int.from_bytes(ba[4:], byteorder=sys.byteorder)
big_endian = False if sys.byteorder == "little" else True
i = from_bytes(ba[:4], big_endian=big_endian)
j = from_bytes(ba[4:], big_endian=big_endian)
#read in the next that-many bytes that occupy the plist header
ba = bytearray(xmlfile_handle.read(j))
#convert to string
#the readPlistFromBytes method doesn't exist in 2.7
if sys.version_info[0] < 3:
header_string = ba.decode("utf-8")
header_dict = plistlib.readPlistFromString(header_string)
else:
header_dict = plistlib.readPlistFromBytes(ba)
return i, j, header_dict
def from_bytes(data, big_endian=False):
"""
python2 doesn't have this function,
so rewrite it for backwards compatibility
"""
if isinstance(data, str):
data = bytearray(data)
if big_endian:
data = reversed(data)
num = 0
for offset, byte in enumerate(data):
num += byte << (offset * 8)
return num
def get_run_number(header_dict):
""" header_dict parse functions, ORCA specific """
for d in (header_dict["ObjectInfo"]["DataChain"]):
if "Run Control" in d:
return (d["Run Control"]["RunNumber"])
raise ValueError("No run number found in header!")
def get_data_id(header_dict, class_name, super_name):
"""
stored like this:
`header_dict["dataDescription"]["ORRunModel"]["Run"]["dataId"]`
but integer needs to be bitshifted by 18
"""
id_int = header_dict["dataDescription"][class_name][super_name]["dataId"]
return id_int >> 18
def flip_data_ids(header_dict):
"""
Returns an inverted dictionary such that:
Could be extended somehow to give you all the supers associated with a given class name (maybe like)
flipped[dataId] = [class_key, [super1, super2, ...]]
"""
flipped = dict()
# header_dict["dataDescription"][class_name][super_name]["dataId"]
for class_key in header_dict["dataDescription"].keys():
super_keys_list = []
for super_key in header_dict["dataDescription"][class_key].keys():
super_keys_list.append(super_key)
ID_val = (header_dict["dataDescription"][class_key][super_key]
["dataId"]) >> 18
flipped[ID_val] = [class_key, super_keys_list]
# this one just gives a single super flipped[dataId] = [class_key, super_key]
# for class_key in header_dict["dataDescription"].keys():
# super_keys_list = header_dict["dataDescription"][class_key].keys()
# ID_val = (header_dict["dataDescription"][class_key][super_keys_list[0]]["dataId"])>>18
# flipped[ID_val] = [class_key,super_keys_list]
return flipped
def get_decoder_for_id(header_dict):
"""
Returns a dictionary that goes:
`dict[dataIdNum] = "decoderName"`
e.g: d[5] = 'ORSIS3302DecoderForEnergy'
"""
d = dict()
for class_key in header_dict["dataDescription"].keys():
super_keys_list = []
for super_key in header_dict["dataDescription"][class_key].keys():
super_keys_list.append(super_key)
ID_val = (header_dict["dataDescription"][class_key][super_key]
["dataId"]) >> 18
decoderName = header_dict["dataDescription"][class_key][super_key][
"decoder"]
d[ID_val] = decoderName
return d
def get_object_info(header_dict, class_name):
"""
returns a dict keyed by data id with all info from the header
TODO: doesn't include all parts of the header yet!
"""
object_info_list = []
crates = header_dict["ObjectInfo"]["Crates"]
for crate in crates:
cards = crate["Cards"]
for card in cards:
if card["Class Name"] == class_name:
card["Crate"] = crate["CrateNumber"]
object_info_list.append(card)
# AuxHw = header_dict["ObjectInfo"]["AuxHw"]
# print("AUX IS:")
# for aux in AuxHw:
# print(aux.keys())
# exit()
if len(object_info_list) == 0:
# print("Warning: no object info parsed for {}".format(class_name))
return None
df = pd.DataFrame.from_dict(object_info_list)
df.set_index(['Crate', 'Card'], inplace=True)
return df
def get_next_event(f_in):
"""
Gets the next event, and some basic information about it
Takes the file pointer as input
Outputs:
-event_data: a byte array of the data produced by the card (could be header + data)
-data_id: This is the identifier for the type of data-taker (i.e. Gretina4M, etc)
# number of bytes to read in = 8 (2x 32-bit words, 4 bytes each)
# The read is set up to do two 32-bit integers, rather than bytes or shorts
# This matches the bitwise arithmetic used elsewhere best, and is easy to implement
"""
try:
# event header is 8 bytes (2 longs)
head = np.fromstring(f_in.read(4), dtype=np.uint32)
except Exception as e:
print(e)
raise Exception("Failed to read in the event orca header.")
# Assuming we're getting an array of bytes:
# record_length = (head[0] + (head[1]<<8) + ((head[2]&0x3)<<16))
# data_id = (head[2] >> 2) + (head[3]<<8)
# slot = (head[6] & 0x1f)
# crate = (head[6]>>5) + head[7]&0x1
# reserved = (head[4] + (head[5]<<8))
# Using an array of uint32
record_length = int((head[0] & 0x3FFFF))
data_id = int((head[0] >> 18))
# slot =int( (head[1] >> 16) & 0x1f)
# crate =int( (head[1] >> 21) & 0xf)
# reserved =int( (head[1] &0xFFFF))
# /* ========== read in the rest of the event data ========== */
try:
# record_length is in longs, read gives bytes
event_data = f_in.read(record_length * 4 - 4)
except Exception as e:
print(" No more data...\n")
print(e)
raise EOFError
return event_data, data_id
| 35.036269
| 104
| 0.617125
|
42b61c317c4e3af41140df8e8a3ecfc2558530b4
| 93,412
|
py
|
Python
|
Lib/test/test_math.py
|
olegasics/cpython
|
769d7d0c66c5b86e2dd29b9ce67ac2daaab1bb38
|
[
"0BSD"
] | 7
|
2021-08-07T07:30:24.000Z
|
2022-03-10T22:48:04.000Z
|
Lib/test/test_math.py
|
olegasics/cpython
|
769d7d0c66c5b86e2dd29b9ce67ac2daaab1bb38
|
[
"0BSD"
] | 16
|
2018-06-03T02:04:29.000Z
|
2022-03-01T00:00:50.000Z
|
Lib/test/test_math.py
|
olegasics/cpython
|
769d7d0c66c5b86e2dd29b9ce67ac2daaab1bb38
|
[
"0BSD"
] | 4
|
2021-07-30T16:40:00.000Z
|
2022-01-29T13:08:28.000Z
|
# Python test set -- math module
# XXXX Should not do tests around zero only
from test.support import run_unittest, verbose, requires_IEEE_754
from test import support
import unittest
import itertools
import decimal
import math
import os
import platform
import random
import struct
import sys
eps = 1E-05
NAN = float('nan')
INF = float('inf')
NINF = float('-inf')
FLOAT_MAX = sys.float_info.max
FLOAT_MIN = sys.float_info.min
# detect evidence of double-rounding: fsum is not always correctly
# rounded on machines that suffer from double rounding.
x, y = 1e16, 2.9999 # use temporary values to defeat peephole optimizer
HAVE_DOUBLE_ROUNDING = (x + y == 1e16 + 4)
# locate file with test values
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
test_dir = os.path.dirname(file) or os.curdir
math_testcases = os.path.join(test_dir, 'math_testcases.txt')
test_file = os.path.join(test_dir, 'cmath_testcases.txt')
def to_ulps(x):
"""Convert a non-NaN float x to an integer, in such a way that
adjacent floats are converted to adjacent integers. Then
abs(ulps(x) - ulps(y)) gives the difference in ulps between two
floats.
The results from this function will only make sense on platforms
where native doubles are represented in IEEE 754 binary64 format.
Note: 0.0 and -0.0 are converted to 0 and -1, respectively.
"""
n = struct.unpack('<q', struct.pack('<d', x))[0]
if n < 0:
n = ~(n+2**63)
return n
# Here's a pure Python version of the math.factorial algorithm, for
# documentation and comparison purposes.
#
# Formula:
#
# factorial(n) = factorial_odd_part(n) << (n - count_set_bits(n))
#
# where
#
# factorial_odd_part(n) = product_{i >= 0} product_{0 < j <= n >> i; j odd} j
#
# The outer product above is an infinite product, but once i >= n.bit_length,
# (n >> i) < 1 and the corresponding term of the product is empty. So only the
# finitely many terms for 0 <= i < n.bit_length() contribute anything.
#
# We iterate downwards from i == n.bit_length() - 1 to i == 0. The inner
# product in the formula above starts at 1 for i == n.bit_length(); for each i
# < n.bit_length() we get the inner product for i from that for i + 1 by
# multiplying by all j in {n >> i+1 < j <= n >> i; j odd}. In Python terms,
# this set is range((n >> i+1) + 1 | 1, (n >> i) + 1 | 1, 2).
def count_set_bits(n):
"""Number of '1' bits in binary expansion of a nonnnegative integer."""
return 1 + count_set_bits(n & n - 1) if n else 0
def partial_product(start, stop):
"""Product of integers in range(start, stop, 2), computed recursively.
start and stop should both be odd, with start <= stop.
"""
numfactors = (stop - start) >> 1
if not numfactors:
return 1
elif numfactors == 1:
return start
else:
mid = (start + numfactors) | 1
return partial_product(start, mid) * partial_product(mid, stop)
def py_factorial(n):
"""Factorial of nonnegative integer n, via "Binary Split Factorial Formula"
described at http://www.luschny.de/math/factorial/binarysplitfact.html
"""
inner = outer = 1
for i in reversed(range(n.bit_length())):
inner *= partial_product((n >> i + 1) + 1 | 1, (n >> i) + 1 | 1)
outer *= inner
return outer << (n - count_set_bits(n))
def ulp_abs_check(expected, got, ulp_tol, abs_tol):
"""Given finite floats `expected` and `got`, check that they're
approximately equal to within the given number of ulps or the
given absolute tolerance, whichever is bigger.
Returns None on success and an error message on failure.
"""
ulp_error = abs(to_ulps(expected) - to_ulps(got))
abs_error = abs(expected - got)
# Succeed if either abs_error <= abs_tol or ulp_error <= ulp_tol.
if abs_error <= abs_tol or ulp_error <= ulp_tol:
return None
else:
fmt = ("error = {:.3g} ({:d} ulps); "
"permitted error = {:.3g} or {:d} ulps")
return fmt.format(abs_error, ulp_error, abs_tol, ulp_tol)
def parse_mtestfile(fname):
"""Parse a file with test values
-- starts a comment
blank lines, or lines containing only a comment, are ignored
other lines are expected to have the form
id fn arg -> expected [flag]*
"""
with open(fname, encoding="utf-8") as fp:
for line in fp:
# strip comments, and skip blank lines
if '--' in line:
line = line[:line.index('--')]
if not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg = lhs.split()
rhs_pieces = rhs.split()
exp = rhs_pieces[0]
flags = rhs_pieces[1:]
yield (id, fn, float(arg), float(exp), flags)
def parse_testfile(fname):
"""Parse a file with test values
Empty lines or lines starting with -- are ignored
yields id, fn, arg_real, arg_imag, exp_real, exp_imag
"""
with open(fname, encoding="utf-8") as fp:
for line in fp:
# skip comment lines and blank lines
if line.startswith('--') or not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg_real, arg_imag = lhs.split()
rhs_pieces = rhs.split()
exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1]
flags = rhs_pieces[2:]
yield (id, fn,
float(arg_real), float(arg_imag),
float(exp_real), float(exp_imag),
flags)
def result_check(expected, got, ulp_tol=5, abs_tol=0.0):
# Common logic of MathTests.(ftest, test_testcases, test_mtestcases)
"""Compare arguments expected and got, as floats, if either
is a float, using a tolerance expressed in multiples of
ulp(expected) or absolutely (if given and greater).
As a convenience, when neither argument is a float, and for
non-finite floats, exact equality is demanded. Also, nan==nan
as far as this function is concerned.
Returns None on success and an error message on failure.
"""
# Check exactly equal (applies also to strings representing exceptions)
if got == expected:
return None
failure = "not equal"
# Turn mixed float and int comparison (e.g. floor()) to all-float
if isinstance(expected, float) and isinstance(got, int):
got = float(got)
elif isinstance(got, float) and isinstance(expected, int):
expected = float(expected)
if isinstance(expected, float) and isinstance(got, float):
if math.isnan(expected) and math.isnan(got):
# Pass, since both nan
failure = None
elif math.isinf(expected) or math.isinf(got):
# We already know they're not equal, drop through to failure
pass
else:
# Both are finite floats (now). Are they close enough?
failure = ulp_abs_check(expected, got, ulp_tol, abs_tol)
# arguments are not equal, and if numeric, are too far apart
if failure is not None:
fail_fmt = "expected {!r}, got {!r}"
fail_msg = fail_fmt.format(expected, got)
fail_msg += ' ({})'.format(failure)
return fail_msg
else:
return None
class FloatLike:
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
class IntSubclass(int):
pass
# Class providing an __index__ method.
class MyIndexable(object):
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
class MathTests(unittest.TestCase):
def ftest(self, name, got, expected, ulp_tol=5, abs_tol=0.0):
"""Compare arguments expected and got, as floats, if either
is a float, using a tolerance expressed in multiples of
ulp(expected) or absolutely, whichever is greater.
As a convenience, when neither argument is a float, and for
non-finite floats, exact equality is demanded. Also, nan==nan
in this function.
"""
failure = result_check(expected, got, ulp_tol, abs_tol)
if failure is not None:
self.fail("{}: {}".format(name, failure))
def testConstants(self):
# Ref: Abramowitz & Stegun (Dover, 1965)
self.ftest('pi', math.pi, 3.141592653589793238462643)
self.ftest('e', math.e, 2.718281828459045235360287)
self.assertEqual(math.tau, 2*math.pi)
def testAcos(self):
self.assertRaises(TypeError, math.acos)
self.ftest('acos(-1)', math.acos(-1), math.pi)
self.ftest('acos(0)', math.acos(0), math.pi/2)
self.ftest('acos(1)', math.acos(1), 0)
self.assertRaises(ValueError, math.acos, INF)
self.assertRaises(ValueError, math.acos, NINF)
self.assertRaises(ValueError, math.acos, 1 + eps)
self.assertRaises(ValueError, math.acos, -1 - eps)
self.assertTrue(math.isnan(math.acos(NAN)))
def testAcosh(self):
self.assertRaises(TypeError, math.acosh)
self.ftest('acosh(1)', math.acosh(1), 0)
self.ftest('acosh(2)', math.acosh(2), 1.3169578969248168)
self.assertRaises(ValueError, math.acosh, 0)
self.assertRaises(ValueError, math.acosh, -1)
self.assertEqual(math.acosh(INF), INF)
self.assertRaises(ValueError, math.acosh, NINF)
self.assertTrue(math.isnan(math.acosh(NAN)))
def testAsin(self):
self.assertRaises(TypeError, math.asin)
self.ftest('asin(-1)', math.asin(-1), -math.pi/2)
self.ftest('asin(0)', math.asin(0), 0)
self.ftest('asin(1)', math.asin(1), math.pi/2)
self.assertRaises(ValueError, math.asin, INF)
self.assertRaises(ValueError, math.asin, NINF)
self.assertRaises(ValueError, math.asin, 1 + eps)
self.assertRaises(ValueError, math.asin, -1 - eps)
self.assertTrue(math.isnan(math.asin(NAN)))
def testAsinh(self):
self.assertRaises(TypeError, math.asinh)
self.ftest('asinh(0)', math.asinh(0), 0)
self.ftest('asinh(1)', math.asinh(1), 0.88137358701954305)
self.ftest('asinh(-1)', math.asinh(-1), -0.88137358701954305)
self.assertEqual(math.asinh(INF), INF)
self.assertEqual(math.asinh(NINF), NINF)
self.assertTrue(math.isnan(math.asinh(NAN)))
def testAtan(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atan(-1)', math.atan(-1), -math.pi/4)
self.ftest('atan(0)', math.atan(0), 0)
self.ftest('atan(1)', math.atan(1), math.pi/4)
self.ftest('atan(inf)', math.atan(INF), math.pi/2)
self.ftest('atan(-inf)', math.atan(NINF), -math.pi/2)
self.assertTrue(math.isnan(math.atan(NAN)))
def testAtanh(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atanh(0)', math.atanh(0), 0)
self.ftest('atanh(0.5)', math.atanh(0.5), 0.54930614433405489)
self.ftest('atanh(-0.5)', math.atanh(-0.5), -0.54930614433405489)
self.assertRaises(ValueError, math.atanh, 1)
self.assertRaises(ValueError, math.atanh, -1)
self.assertRaises(ValueError, math.atanh, INF)
self.assertRaises(ValueError, math.atanh, NINF)
self.assertTrue(math.isnan(math.atanh(NAN)))
def testAtan2(self):
self.assertRaises(TypeError, math.atan2)
self.ftest('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
self.ftest('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
self.ftest('atan2(0, 1)', math.atan2(0, 1), 0)
self.ftest('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
self.ftest('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
# math.atan2(0, x)
self.ftest('atan2(0., -inf)', math.atan2(0., NINF), math.pi)
self.ftest('atan2(0., -2.3)', math.atan2(0., -2.3), math.pi)
self.ftest('atan2(0., -0.)', math.atan2(0., -0.), math.pi)
self.assertEqual(math.atan2(0., 0.), 0.)
self.assertEqual(math.atan2(0., 2.3), 0.)
self.assertEqual(math.atan2(0., INF), 0.)
self.assertTrue(math.isnan(math.atan2(0., NAN)))
# math.atan2(-0, x)
self.ftest('atan2(-0., -inf)', math.atan2(-0., NINF), -math.pi)
self.ftest('atan2(-0., -2.3)', math.atan2(-0., -2.3), -math.pi)
self.ftest('atan2(-0., -0.)', math.atan2(-0., -0.), -math.pi)
self.assertEqual(math.atan2(-0., 0.), -0.)
self.assertEqual(math.atan2(-0., 2.3), -0.)
self.assertEqual(math.atan2(-0., INF), -0.)
self.assertTrue(math.isnan(math.atan2(-0., NAN)))
# math.atan2(INF, x)
self.ftest('atan2(inf, -inf)', math.atan2(INF, NINF), math.pi*3/4)
self.ftest('atan2(inf, -2.3)', math.atan2(INF, -2.3), math.pi/2)
self.ftest('atan2(inf, -0.)', math.atan2(INF, -0.0), math.pi/2)
self.ftest('atan2(inf, 0.)', math.atan2(INF, 0.0), math.pi/2)
self.ftest('atan2(inf, 2.3)', math.atan2(INF, 2.3), math.pi/2)
self.ftest('atan2(inf, inf)', math.atan2(INF, INF), math.pi/4)
self.assertTrue(math.isnan(math.atan2(INF, NAN)))
# math.atan2(NINF, x)
self.ftest('atan2(-inf, -inf)', math.atan2(NINF, NINF), -math.pi*3/4)
self.ftest('atan2(-inf, -2.3)', math.atan2(NINF, -2.3), -math.pi/2)
self.ftest('atan2(-inf, -0.)', math.atan2(NINF, -0.0), -math.pi/2)
self.ftest('atan2(-inf, 0.)', math.atan2(NINF, 0.0), -math.pi/2)
self.ftest('atan2(-inf, 2.3)', math.atan2(NINF, 2.3), -math.pi/2)
self.ftest('atan2(-inf, inf)', math.atan2(NINF, INF), -math.pi/4)
self.assertTrue(math.isnan(math.atan2(NINF, NAN)))
# math.atan2(+finite, x)
self.ftest('atan2(2.3, -inf)', math.atan2(2.3, NINF), math.pi)
self.ftest('atan2(2.3, -0.)', math.atan2(2.3, -0.), math.pi/2)
self.ftest('atan2(2.3, 0.)', math.atan2(2.3, 0.), math.pi/2)
self.assertEqual(math.atan2(2.3, INF), 0.)
self.assertTrue(math.isnan(math.atan2(2.3, NAN)))
# math.atan2(-finite, x)
self.ftest('atan2(-2.3, -inf)', math.atan2(-2.3, NINF), -math.pi)
self.ftest('atan2(-2.3, -0.)', math.atan2(-2.3, -0.), -math.pi/2)
self.ftest('atan2(-2.3, 0.)', math.atan2(-2.3, 0.), -math.pi/2)
self.assertEqual(math.atan2(-2.3, INF), -0.)
self.assertTrue(math.isnan(math.atan2(-2.3, NAN)))
# math.atan2(NAN, x)
self.assertTrue(math.isnan(math.atan2(NAN, NINF)))
self.assertTrue(math.isnan(math.atan2(NAN, -2.3)))
self.assertTrue(math.isnan(math.atan2(NAN, -0.)))
self.assertTrue(math.isnan(math.atan2(NAN, 0.)))
self.assertTrue(math.isnan(math.atan2(NAN, 2.3)))
self.assertTrue(math.isnan(math.atan2(NAN, INF)))
self.assertTrue(math.isnan(math.atan2(NAN, NAN)))
def testCbrt(self):
self.assertRaises(TypeError, math.cbrt)
self.ftest('cbrt(0)', math.cbrt(0), 0)
self.ftest('cbrt(1)', math.cbrt(1), 1)
self.ftest('cbrt(8)', math.cbrt(8), 2)
self.ftest('cbrt(0.0)', math.cbrt(0.0), 0.0)
self.ftest('cbrt(-0.0)', math.cbrt(-0.0), -0.0)
self.ftest('cbrt(1.2)', math.cbrt(1.2), 1.062658569182611)
self.ftest('cbrt(-2.6)', math.cbrt(-2.6), -1.375068867074141)
self.ftest('cbrt(27)', math.cbrt(27), 3)
self.ftest('cbrt(-1)', math.cbrt(-1), -1)
self.ftest('cbrt(-27)', math.cbrt(-27), -3)
self.assertEqual(math.cbrt(INF), INF)
self.assertEqual(math.cbrt(NINF), NINF)
self.assertTrue(math.isnan(math.cbrt(NAN)))
def testCeil(self):
self.assertRaises(TypeError, math.ceil)
self.assertEqual(int, type(math.ceil(0.5)))
self.assertEqual(math.ceil(0.5), 1)
self.assertEqual(math.ceil(1.0), 1)
self.assertEqual(math.ceil(1.5), 2)
self.assertEqual(math.ceil(-0.5), 0)
self.assertEqual(math.ceil(-1.0), -1)
self.assertEqual(math.ceil(-1.5), -1)
self.assertEqual(math.ceil(0.0), 0)
self.assertEqual(math.ceil(-0.0), 0)
#self.assertEqual(math.ceil(INF), INF)
#self.assertEqual(math.ceil(NINF), NINF)
#self.assertTrue(math.isnan(math.ceil(NAN)))
class TestCeil:
def __ceil__(self):
return 42
class FloatCeil(float):
def __ceil__(self):
return 42
class TestNoCeil:
pass
self.assertEqual(math.ceil(TestCeil()), 42)
self.assertEqual(math.ceil(FloatCeil()), 42)
self.assertEqual(math.ceil(FloatLike(42.5)), 43)
self.assertRaises(TypeError, math.ceil, TestNoCeil())
t = TestNoCeil()
t.__ceil__ = lambda *args: args
self.assertRaises(TypeError, math.ceil, t)
self.assertRaises(TypeError, math.ceil, t, 0)
@requires_IEEE_754
def testCopysign(self):
self.assertEqual(math.copysign(1, 42), 1.0)
self.assertEqual(math.copysign(0., 42), 0.0)
self.assertEqual(math.copysign(1., -42), -1.0)
self.assertEqual(math.copysign(3, 0.), 3.0)
self.assertEqual(math.copysign(4., -0.), -4.0)
self.assertRaises(TypeError, math.copysign)
# copysign should let us distinguish signs of zeros
self.assertEqual(math.copysign(1., 0.), 1.)
self.assertEqual(math.copysign(1., -0.), -1.)
self.assertEqual(math.copysign(INF, 0.), INF)
self.assertEqual(math.copysign(INF, -0.), NINF)
self.assertEqual(math.copysign(NINF, 0.), INF)
self.assertEqual(math.copysign(NINF, -0.), NINF)
# and of infinities
self.assertEqual(math.copysign(1., INF), 1.)
self.assertEqual(math.copysign(1., NINF), -1.)
self.assertEqual(math.copysign(INF, INF), INF)
self.assertEqual(math.copysign(INF, NINF), NINF)
self.assertEqual(math.copysign(NINF, INF), INF)
self.assertEqual(math.copysign(NINF, NINF), NINF)
self.assertTrue(math.isnan(math.copysign(NAN, 1.)))
self.assertTrue(math.isnan(math.copysign(NAN, INF)))
self.assertTrue(math.isnan(math.copysign(NAN, NINF)))
self.assertTrue(math.isnan(math.copysign(NAN, NAN)))
# copysign(INF, NAN) may be INF or it may be NINF, since
# we don't know whether the sign bit of NAN is set on any
# given platform.
self.assertTrue(math.isinf(math.copysign(INF, NAN)))
# similarly, copysign(2., NAN) could be 2. or -2.
self.assertEqual(abs(math.copysign(2., NAN)), 2.)
def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0, abs_tol=math.ulp(1))
self.ftest('cos(0)', math.cos(0), 1)
self.ftest('cos(pi/2)', math.cos(math.pi/2), 0, abs_tol=math.ulp(1))
self.ftest('cos(pi)', math.cos(math.pi), -1)
try:
self.assertTrue(math.isnan(math.cos(INF)))
self.assertTrue(math.isnan(math.cos(NINF)))
except ValueError:
self.assertRaises(ValueError, math.cos, INF)
self.assertRaises(ValueError, math.cos, NINF)
self.assertTrue(math.isnan(math.cos(NAN)))
@unittest.skipIf(sys.platform == 'win32' and platform.machine() in ('ARM', 'ARM64'),
"Windows UCRT is off by 2 ULP this test requires accuracy within 1 ULP")
def testCosh(self):
self.assertRaises(TypeError, math.cosh)
self.ftest('cosh(0)', math.cosh(0), 1)
self.ftest('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
self.assertEqual(math.cosh(INF), INF)
self.assertEqual(math.cosh(NINF), INF)
self.assertTrue(math.isnan(math.cosh(NAN)))
def testDegrees(self):
self.assertRaises(TypeError, math.degrees)
self.ftest('degrees(pi)', math.degrees(math.pi), 180.0)
self.ftest('degrees(pi/2)', math.degrees(math.pi/2), 90.0)
self.ftest('degrees(-pi/4)', math.degrees(-math.pi/4), -45.0)
self.ftest('degrees(0)', math.degrees(0), 0)
def testExp(self):
self.assertRaises(TypeError, math.exp)
self.ftest('exp(-1)', math.exp(-1), 1/math.e)
self.ftest('exp(0)', math.exp(0), 1)
self.ftest('exp(1)', math.exp(1), math.e)
self.assertEqual(math.exp(INF), INF)
self.assertEqual(math.exp(NINF), 0.)
self.assertTrue(math.isnan(math.exp(NAN)))
self.assertRaises(OverflowError, math.exp, 1000000)
def testFabs(self):
self.assertRaises(TypeError, math.fabs)
self.ftest('fabs(-1)', math.fabs(-1), 1)
self.ftest('fabs(0)', math.fabs(0), 0)
self.ftest('fabs(1)', math.fabs(1), 1)
def testFactorial(self):
self.assertEqual(math.factorial(0), 1)
total = 1
for i in range(1, 1000):
total *= i
self.assertEqual(math.factorial(i), total)
self.assertEqual(math.factorial(i), py_factorial(i))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, -10**100)
def testFactorialNonIntegers(self):
self.assertRaises(TypeError, math.factorial, 5.0)
self.assertRaises(TypeError, math.factorial, 5.2)
self.assertRaises(TypeError, math.factorial, -1.0)
self.assertRaises(TypeError, math.factorial, -1e100)
self.assertRaises(TypeError, math.factorial, decimal.Decimal('5'))
self.assertRaises(TypeError, math.factorial, decimal.Decimal('5.2'))
self.assertRaises(TypeError, math.factorial, "5")
# Other implementations may place different upper bounds.
@support.cpython_only
def testFactorialHugeInputs(self):
# Currently raises OverflowError for inputs that are too large
# to fit into a C long.
self.assertRaises(OverflowError, math.factorial, 10**100)
self.assertRaises(TypeError, math.factorial, 1e100)
def testFloor(self):
self.assertRaises(TypeError, math.floor)
self.assertEqual(int, type(math.floor(0.5)))
self.assertEqual(math.floor(0.5), 0)
self.assertEqual(math.floor(1.0), 1)
self.assertEqual(math.floor(1.5), 1)
self.assertEqual(math.floor(-0.5), -1)
self.assertEqual(math.floor(-1.0), -1)
self.assertEqual(math.floor(-1.5), -2)
#self.assertEqual(math.ceil(INF), INF)
#self.assertEqual(math.ceil(NINF), NINF)
#self.assertTrue(math.isnan(math.floor(NAN)))
class TestFloor:
def __floor__(self):
return 42
class FloatFloor(float):
def __floor__(self):
return 42
class TestNoFloor:
pass
self.assertEqual(math.floor(TestFloor()), 42)
self.assertEqual(math.floor(FloatFloor()), 42)
self.assertEqual(math.floor(FloatLike(41.9)), 41)
self.assertRaises(TypeError, math.floor, TestNoFloor())
t = TestNoFloor()
t.__floor__ = lambda *args: args
self.assertRaises(TypeError, math.floor, t)
self.assertRaises(TypeError, math.floor, t, 0)
def testFmod(self):
self.assertRaises(TypeError, math.fmod)
self.ftest('fmod(10, 1)', math.fmod(10, 1), 0.0)
self.ftest('fmod(10, 0.5)', math.fmod(10, 0.5), 0.0)
self.ftest('fmod(10, 1.5)', math.fmod(10, 1.5), 1.0)
self.ftest('fmod(-10, 1)', math.fmod(-10, 1), -0.0)
self.ftest('fmod(-10, 0.5)', math.fmod(-10, 0.5), -0.0)
self.ftest('fmod(-10, 1.5)', math.fmod(-10, 1.5), -1.0)
self.assertTrue(math.isnan(math.fmod(NAN, 1.)))
self.assertTrue(math.isnan(math.fmod(1., NAN)))
self.assertTrue(math.isnan(math.fmod(NAN, NAN)))
self.assertRaises(ValueError, math.fmod, 1., 0.)
self.assertRaises(ValueError, math.fmod, INF, 1.)
self.assertRaises(ValueError, math.fmod, NINF, 1.)
self.assertRaises(ValueError, math.fmod, INF, 0.)
self.assertEqual(math.fmod(3.0, INF), 3.0)
self.assertEqual(math.fmod(-3.0, INF), -3.0)
self.assertEqual(math.fmod(3.0, NINF), 3.0)
self.assertEqual(math.fmod(-3.0, NINF), -3.0)
self.assertEqual(math.fmod(0.0, 3.0), 0.0)
self.assertEqual(math.fmod(0.0, NINF), 0.0)
def testFrexp(self):
self.assertRaises(TypeError, math.frexp)
def testfrexp(name, result, expected):
(mant, exp), (emant, eexp) = result, expected
if abs(mant-emant) > eps or exp != eexp:
self.fail('%s returned %r, expected %r'%\
(name, result, expected))
testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
testfrexp('frexp(0)', math.frexp(0), (0, 0))
testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
self.assertEqual(math.frexp(INF)[0], INF)
self.assertEqual(math.frexp(NINF)[0], NINF)
self.assertTrue(math.isnan(math.frexp(NAN)[0]))
@requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"fsum is not exact on machines with double rounding")
def testFsum(self):
# math.fsum relies on exact rounding for correct operation.
# There's a known problem with IA32 floating-point that causes
# inexact rounding in some situations, and will cause the
# math.fsum tests below to fail; see issue #2937. On non IEEE
# 754 platforms, and on IEEE 754 platforms that exhibit the
# problem described in issue #2937, we simply skip the whole
# test.
# Python version of math.fsum, for comparison. Uses a
# different algorithm based on frexp, ldexp and integer
# arithmetic.
from sys import float_info
mant_dig = float_info.mant_dig
etiny = float_info.min_exp - mant_dig
def msum(iterable):
"""Full precision summation. Compute sum(iterable) without any
intermediate accumulation of error. Based on the 'lsum' function
at http://code.activestate.com/recipes/393090/
"""
tmant, texp = 0, 0
for x in iterable:
mant, exp = math.frexp(x)
mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig
if texp > exp:
tmant <<= texp-exp
texp = exp
else:
mant <<= exp-texp
tmant += mant
# Round tmant * 2**texp to a float. The original recipe
# used float(str(tmant)) * 2.0**texp for this, but that's
# a little unsafe because str -> float conversion can't be
# relied upon to do correct rounding on all platforms.
tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp)
if tail > 0:
h = 1 << (tail-1)
tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1)
texp += tail
return math.ldexp(tmant, texp)
test_values = [
([], 0.0),
([0.0], 0.0),
([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100),
([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0),
([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0),
([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0),
([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0),
([1./n for n in range(1, 1001)],
float.fromhex('0x1.df11f45f4e61ap+2')),
([(-1.)**n/n for n in range(1, 1001)],
float.fromhex('-0x1.62a2af1bd3624p-1')),
([1e16, 1., 1e-16], 10000000000000002.0),
([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0),
# exercise code for resizing partials array
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
]
# Telescoping sum, with exact differences (due to Sterbenz)
terms = [1.7**i for i in range(1001)]
test_values.append((
[terms[i+1] - terms[i] for i in range(1000)] + [-terms[1000]],
-terms[0]
))
for i, (vals, expected) in enumerate(test_values):
try:
actual = math.fsum(vals)
except OverflowError:
self.fail("test %d failed: got OverflowError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
except ValueError:
self.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
self.assertEqual(actual, expected)
from random import random, gauss, shuffle
for j in range(1000):
vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10
s = 0
for i in range(200):
v = gauss(0, random()) ** 7 - s
s += v
vals.append(v)
shuffle(vals)
s = msum(vals)
self.assertEqual(msum(vals), math.fsum(vals))
def testGcd(self):
gcd = math.gcd
self.assertEqual(gcd(0, 0), 0)
self.assertEqual(gcd(1, 0), 1)
self.assertEqual(gcd(-1, 0), 1)
self.assertEqual(gcd(0, 1), 1)
self.assertEqual(gcd(0, -1), 1)
self.assertEqual(gcd(7, 1), 1)
self.assertEqual(gcd(7, -1), 1)
self.assertEqual(gcd(-23, 15), 1)
self.assertEqual(gcd(120, 84), 12)
self.assertEqual(gcd(84, -120), 12)
self.assertEqual(gcd(1216342683557601535506311712,
436522681849110124616458784), 32)
x = 434610456570399902378880679233098819019853229470286994367836600566
y = 1064502245825115327754847244914921553977
for c in (652560,
576559230871654959816130551884856912003141446781646602790216406874):
a = x * c
b = y * c
self.assertEqual(gcd(a, b), c)
self.assertEqual(gcd(b, a), c)
self.assertEqual(gcd(-a, b), c)
self.assertEqual(gcd(b, -a), c)
self.assertEqual(gcd(a, -b), c)
self.assertEqual(gcd(-b, a), c)
self.assertEqual(gcd(-a, -b), c)
self.assertEqual(gcd(-b, -a), c)
self.assertEqual(gcd(), 0)
self.assertEqual(gcd(120), 120)
self.assertEqual(gcd(-120), 120)
self.assertEqual(gcd(120, 84, 102), 6)
self.assertEqual(gcd(120, 1, 84), 1)
self.assertRaises(TypeError, gcd, 120.0)
self.assertRaises(TypeError, gcd, 120.0, 84)
self.assertRaises(TypeError, gcd, 120, 84.0)
self.assertRaises(TypeError, gcd, 120, 1, 84.0)
self.assertEqual(gcd(MyIndexable(120), MyIndexable(84)), 12)
def testHypot(self):
from decimal import Decimal
from fractions import Fraction
hypot = math.hypot
# Test different numbers of arguments (from zero to five)
# against a straightforward pure python implementation
args = math.e, math.pi, math.sqrt(2.0), math.gamma(3.5), math.sin(2.1)
for i in range(len(args)+1):
self.assertAlmostEqual(
hypot(*args[:i]),
math.sqrt(sum(s**2 for s in args[:i]))
)
# Test allowable types (those with __float__)
self.assertEqual(hypot(12.0, 5.0), 13.0)
self.assertEqual(hypot(12, 5), 13)
self.assertEqual(hypot(Decimal(12), Decimal(5)), 13)
self.assertEqual(hypot(Fraction(12, 32), Fraction(5, 32)), Fraction(13, 32))
self.assertEqual(hypot(bool(1), bool(0), bool(1), bool(1)), math.sqrt(3))
# Test corner cases
self.assertEqual(hypot(0.0, 0.0), 0.0) # Max input is zero
self.assertEqual(hypot(-10.5), 10.5) # Negative input
self.assertEqual(hypot(), 0.0) # Negative input
self.assertEqual(1.0,
math.copysign(1.0, hypot(-0.0)) # Convert negative zero to positive zero
)
self.assertEqual( # Handling of moving max to the end
hypot(1.5, 1.5, 0.5),
hypot(1.5, 0.5, 1.5),
)
# Test handling of bad arguments
with self.assertRaises(TypeError): # Reject keyword args
hypot(x=1)
with self.assertRaises(TypeError): # Reject values without __float__
hypot(1.1, 'string', 2.2)
int_too_big_for_float = 10 ** (sys.float_info.max_10_exp + 5)
with self.assertRaises((ValueError, OverflowError)):
hypot(1, int_too_big_for_float)
# Any infinity gives positive infinity.
self.assertEqual(hypot(INF), INF)
self.assertEqual(hypot(0, INF), INF)
self.assertEqual(hypot(10, INF), INF)
self.assertEqual(hypot(-10, INF), INF)
self.assertEqual(hypot(NAN, INF), INF)
self.assertEqual(hypot(INF, NAN), INF)
self.assertEqual(hypot(NINF, NAN), INF)
self.assertEqual(hypot(NAN, NINF), INF)
self.assertEqual(hypot(-INF, INF), INF)
self.assertEqual(hypot(-INF, -INF), INF)
self.assertEqual(hypot(10, -INF), INF)
# If no infinity, any NaN gives a NaN.
self.assertTrue(math.isnan(hypot(NAN)))
self.assertTrue(math.isnan(hypot(0, NAN)))
self.assertTrue(math.isnan(hypot(NAN, 10)))
self.assertTrue(math.isnan(hypot(10, NAN)))
self.assertTrue(math.isnan(hypot(NAN, NAN)))
self.assertTrue(math.isnan(hypot(NAN)))
# Verify scaling for extremely large values
fourthmax = FLOAT_MAX / 4.0
for n in range(32):
self.assertTrue(math.isclose(hypot(*([fourthmax]*n)),
fourthmax * math.sqrt(n)))
# Verify scaling for extremely small values
for exp in range(32):
scale = FLOAT_MIN / 2.0 ** exp
self.assertEqual(math.hypot(4*scale, 3*scale), 5*scale)
@requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"hypot() loses accuracy on machines with double rounding")
def testHypotAccuracy(self):
# Verify improved accuracy in cases that were known to be inaccurate.
#
# The new algorithm's accuracy depends on IEEE 754 arithmetic
# guarantees, on having the usual ROUND HALF EVEN rounding mode, on
# the system not having double rounding due to extended precision,
# and on the compiler maintaining the specified order of operations.
#
# This test is known to succeed on most of our builds. If it fails
# some build, we either need to add another skipIf if the cause is
# identifiable; otherwise, we can remove this test entirely.
hypot = math.hypot
Decimal = decimal.Decimal
high_precision = decimal.Context(prec=500)
for hx, hy in [
# Cases with a 1 ulp error in Python 3.7 compiled with Clang
('0x1.10e89518dca48p+29', '0x1.1970f7565b7efp+30'),
('0x1.10106eb4b44a2p+29', '0x1.ef0596cdc97f8p+29'),
('0x1.459c058e20bb7p+30', '0x1.993ca009b9178p+29'),
('0x1.378371ae67c0cp+30', '0x1.fbe6619854b4cp+29'),
('0x1.f4cd0574fb97ap+29', '0x1.50fe31669340ep+30'),
('0x1.494b2cdd3d446p+29', '0x1.212a5367b4c7cp+29'),
('0x1.f84e649f1e46dp+29', '0x1.1fa56bef8eec4p+30'),
('0x1.2e817edd3d6fap+30', '0x1.eb0814f1e9602p+29'),
('0x1.0d3a6e3d04245p+29', '0x1.32a62fea52352p+30'),
('0x1.888e19611bfc5p+29', '0x1.52b8e70b24353p+29'),
# Cases with 2 ulp error in Python 3.8
('0x1.538816d48a13fp+29', '0x1.7967c5ca43e16p+29'),
('0x1.57b47b7234530p+29', '0x1.74e2c7040e772p+29'),
('0x1.821b685e9b168p+30', '0x1.677dc1c1e3dc6p+29'),
('0x1.9e8247f67097bp+29', '0x1.24bd2dc4f4baep+29'),
('0x1.b73b59e0cb5f9p+29', '0x1.da899ab784a97p+28'),
('0x1.94a8d2842a7cfp+30', '0x1.326a51d4d8d8ap+30'),
('0x1.e930b9cd99035p+29', '0x1.5a1030e18dff9p+30'),
('0x1.1592bbb0e4690p+29', '0x1.a9c337b33fb9ap+29'),
('0x1.1243a50751fd4p+29', '0x1.a5a10175622d9p+29'),
('0x1.57a8596e74722p+30', '0x1.42d1af9d04da9p+30'),
# Cases with 1 ulp error in version fff3c28052e6b0
('0x1.ee7dbd9565899p+29', '0x1.7ab4d6fc6e4b4p+29'),
('0x1.5c6bfbec5c4dcp+30', '0x1.02511184b4970p+30'),
('0x1.59dcebba995cap+30', '0x1.50ca7e7c38854p+29'),
('0x1.768cdd94cf5aap+29', '0x1.9cfdc5571d38ep+29'),
('0x1.dcf137d60262ep+29', '0x1.1101621990b3ep+30'),
('0x1.3a2d006e288b0p+30', '0x1.e9a240914326cp+29'),
('0x1.62a32f7f53c61p+29', '0x1.47eb6cd72684fp+29'),
('0x1.d3bcb60748ef2p+29', '0x1.3f13c4056312cp+30'),
('0x1.282bdb82f17f3p+30', '0x1.640ba4c4eed3ap+30'),
('0x1.89d8c423ea0c6p+29', '0x1.d35dcfe902bc3p+29'),
]:
x = float.fromhex(hx)
y = float.fromhex(hy)
with self.subTest(hx=hx, hy=hy, x=x, y=y):
with decimal.localcontext(high_precision):
z = float((Decimal(x)**2 + Decimal(y)**2).sqrt())
self.assertEqual(hypot(x, y), z)
def testDist(self):
from decimal import Decimal as D
from fractions import Fraction as F
dist = math.dist
sqrt = math.sqrt
# Simple exact cases
self.assertEqual(dist((1.0, 2.0, 3.0), (4.0, 2.0, -1.0)), 5.0)
self.assertEqual(dist((1, 2, 3), (4, 2, -1)), 5.0)
# Test different numbers of arguments (from zero to nine)
# against a straightforward pure python implementation
for i in range(9):
for j in range(5):
p = tuple(random.uniform(-5, 5) for k in range(i))
q = tuple(random.uniform(-5, 5) for k in range(i))
self.assertAlmostEqual(
dist(p, q),
sqrt(sum((px - qx) ** 2.0 for px, qx in zip(p, q)))
)
# Test non-tuple inputs
self.assertEqual(dist([1.0, 2.0, 3.0], [4.0, 2.0, -1.0]), 5.0)
self.assertEqual(dist(iter([1.0, 2.0, 3.0]), iter([4.0, 2.0, -1.0])), 5.0)
# Test allowable types (those with __float__)
self.assertEqual(dist((14.0, 1.0), (2.0, -4.0)), 13.0)
self.assertEqual(dist((14, 1), (2, -4)), 13)
self.assertEqual(dist((D(14), D(1)), (D(2), D(-4))), D(13))
self.assertEqual(dist((F(14, 32), F(1, 32)), (F(2, 32), F(-4, 32))),
F(13, 32))
self.assertEqual(dist((True, True, False, True, False),
(True, False, True, True, False)),
sqrt(2.0))
# Test corner cases
self.assertEqual(dist((13.25, 12.5, -3.25),
(13.25, 12.5, -3.25)),
0.0) # Distance with self is zero
self.assertEqual(dist((), ()), 0.0) # Zero-dimensional case
self.assertEqual(1.0, # Convert negative zero to positive zero
math.copysign(1.0, dist((-0.0,), (0.0,)))
)
self.assertEqual(1.0, # Convert negative zero to positive zero
math.copysign(1.0, dist((0.0,), (-0.0,)))
)
self.assertEqual( # Handling of moving max to the end
dist((1.5, 1.5, 0.5), (0, 0, 0)),
dist((1.5, 0.5, 1.5), (0, 0, 0))
)
# Verify tuple subclasses are allowed
class T(tuple):
pass
self.assertEqual(dist(T((1, 2, 3)), ((4, 2, -1))), 5.0)
# Test handling of bad arguments
with self.assertRaises(TypeError): # Reject keyword args
dist(p=(1, 2, 3), q=(4, 5, 6))
with self.assertRaises(TypeError): # Too few args
dist((1, 2, 3))
with self.assertRaises(TypeError): # Too many args
dist((1, 2, 3), (4, 5, 6), (7, 8, 9))
with self.assertRaises(TypeError): # Scalars not allowed
dist(1, 2)
with self.assertRaises(TypeError): # Reject values without __float__
dist((1.1, 'string', 2.2), (1, 2, 3))
with self.assertRaises(ValueError): # Check dimension agree
dist((1, 2, 3, 4), (5, 6, 7))
with self.assertRaises(ValueError): # Check dimension agree
dist((1, 2, 3), (4, 5, 6, 7))
with self.assertRaises(TypeError): # Rejects invalid types
dist("abc", "xyz")
int_too_big_for_float = 10 ** (sys.float_info.max_10_exp + 5)
with self.assertRaises((ValueError, OverflowError)):
dist((1, int_too_big_for_float), (2, 3))
with self.assertRaises((ValueError, OverflowError)):
dist((2, 3), (1, int_too_big_for_float))
# Verify that the one dimensional case is equivalent to abs()
for i in range(20):
p, q = random.random(), random.random()
self.assertEqual(dist((p,), (q,)), abs(p - q))
# Test special values
values = [NINF, -10.5, -0.0, 0.0, 10.5, INF, NAN]
for p in itertools.product(values, repeat=3):
for q in itertools.product(values, repeat=3):
diffs = [px - qx for px, qx in zip(p, q)]
if any(map(math.isinf, diffs)):
# Any infinite difference gives positive infinity.
self.assertEqual(dist(p, q), INF)
elif any(map(math.isnan, diffs)):
# If no infinity, any NaN gives a NaN.
self.assertTrue(math.isnan(dist(p, q)))
# Verify scaling for extremely large values
fourthmax = FLOAT_MAX / 4.0
for n in range(32):
p = (fourthmax,) * n
q = (0.0,) * n
self.assertTrue(math.isclose(dist(p, q), fourthmax * math.sqrt(n)))
self.assertTrue(math.isclose(dist(q, p), fourthmax * math.sqrt(n)))
# Verify scaling for extremely small values
for exp in range(32):
scale = FLOAT_MIN / 2.0 ** exp
p = (4*scale, 3*scale)
q = (0.0, 0.0)
self.assertEqual(math.dist(p, q), 5*scale)
self.assertEqual(math.dist(q, p), 5*scale)
def testIsqrt(self):
# Test a variety of inputs, large and small.
test_values = (
list(range(1000))
+ list(range(10**6 - 1000, 10**6 + 1000))
+ [2**e + i for e in range(60, 200) for i in range(-40, 40)]
+ [3**9999, 10**5001]
)
for value in test_values:
with self.subTest(value=value):
s = math.isqrt(value)
self.assertIs(type(s), int)
self.assertLessEqual(s*s, value)
self.assertLess(value, (s+1)*(s+1))
# Negative values
with self.assertRaises(ValueError):
math.isqrt(-1)
# Integer-like things
s = math.isqrt(True)
self.assertIs(type(s), int)
self.assertEqual(s, 1)
s = math.isqrt(False)
self.assertIs(type(s), int)
self.assertEqual(s, 0)
class IntegerLike(object):
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
s = math.isqrt(IntegerLike(1729))
self.assertIs(type(s), int)
self.assertEqual(s, 41)
with self.assertRaises(ValueError):
math.isqrt(IntegerLike(-3))
# Non-integer-like things
bad_values = [
3.5, "a string", decimal.Decimal("3.5"), 3.5j,
100.0, -4.0,
]
for value in bad_values:
with self.subTest(value=value):
with self.assertRaises(TypeError):
math.isqrt(value)
def test_lcm(self):
lcm = math.lcm
self.assertEqual(lcm(0, 0), 0)
self.assertEqual(lcm(1, 0), 0)
self.assertEqual(lcm(-1, 0), 0)
self.assertEqual(lcm(0, 1), 0)
self.assertEqual(lcm(0, -1), 0)
self.assertEqual(lcm(7, 1), 7)
self.assertEqual(lcm(7, -1), 7)
self.assertEqual(lcm(-23, 15), 345)
self.assertEqual(lcm(120, 84), 840)
self.assertEqual(lcm(84, -120), 840)
self.assertEqual(lcm(1216342683557601535506311712,
436522681849110124616458784),
16592536571065866494401400422922201534178938447014944)
x = 43461045657039990237
y = 10645022458251153277
for c in (652560,
57655923087165495981):
a = x * c
b = y * c
d = x * y * c
self.assertEqual(lcm(a, b), d)
self.assertEqual(lcm(b, a), d)
self.assertEqual(lcm(-a, b), d)
self.assertEqual(lcm(b, -a), d)
self.assertEqual(lcm(a, -b), d)
self.assertEqual(lcm(-b, a), d)
self.assertEqual(lcm(-a, -b), d)
self.assertEqual(lcm(-b, -a), d)
self.assertEqual(lcm(), 1)
self.assertEqual(lcm(120), 120)
self.assertEqual(lcm(-120), 120)
self.assertEqual(lcm(120, 84, 102), 14280)
self.assertEqual(lcm(120, 0, 84), 0)
self.assertRaises(TypeError, lcm, 120.0)
self.assertRaises(TypeError, lcm, 120.0, 84)
self.assertRaises(TypeError, lcm, 120, 84.0)
self.assertRaises(TypeError, lcm, 120, 0, 84.0)
self.assertEqual(lcm(MyIndexable(120), MyIndexable(84)), 840)
def testLdexp(self):
self.assertRaises(TypeError, math.ldexp)
self.ftest('ldexp(0,1)', math.ldexp(0,1), 0)
self.ftest('ldexp(1,1)', math.ldexp(1,1), 2)
self.ftest('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
self.ftest('ldexp(-1,1)', math.ldexp(-1,1), -2)
self.assertRaises(OverflowError, math.ldexp, 1., 1000000)
self.assertRaises(OverflowError, math.ldexp, -1., 1000000)
self.assertEqual(math.ldexp(1., -1000000), 0.)
self.assertEqual(math.ldexp(-1., -1000000), -0.)
self.assertEqual(math.ldexp(INF, 30), INF)
self.assertEqual(math.ldexp(NINF, -213), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, 0)))
# large second argument
for n in [10**5, 10**10, 10**20, 10**40]:
self.assertEqual(math.ldexp(INF, -n), INF)
self.assertEqual(math.ldexp(NINF, -n), NINF)
self.assertEqual(math.ldexp(1., -n), 0.)
self.assertEqual(math.ldexp(-1., -n), -0.)
self.assertEqual(math.ldexp(0., -n), 0.)
self.assertEqual(math.ldexp(-0., -n), -0.)
self.assertTrue(math.isnan(math.ldexp(NAN, -n)))
self.assertRaises(OverflowError, math.ldexp, 1., n)
self.assertRaises(OverflowError, math.ldexp, -1., n)
self.assertEqual(math.ldexp(0., n), 0.)
self.assertEqual(math.ldexp(-0., n), -0.)
self.assertEqual(math.ldexp(INF, n), INF)
self.assertEqual(math.ldexp(NINF, n), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, n)))
def testLog(self):
self.assertRaises(TypeError, math.log)
self.ftest('log(1/e)', math.log(1/math.e), -1)
self.ftest('log(1)', math.log(1), 0)
self.ftest('log(e)', math.log(math.e), 1)
self.ftest('log(32,2)', math.log(32,2), 5)
self.ftest('log(10**40, 10)', math.log(10**40, 10), 40)
self.ftest('log(10**40, 10**20)', math.log(10**40, 10**20), 2)
self.ftest('log(10**1000)', math.log(10**1000),
2302.5850929940457)
self.assertRaises(ValueError, math.log, -1.5)
self.assertRaises(ValueError, math.log, -10**1000)
self.assertRaises(ValueError, math.log, NINF)
self.assertEqual(math.log(INF), INF)
self.assertTrue(math.isnan(math.log(NAN)))
def testLog1p(self):
self.assertRaises(TypeError, math.log1p)
for n in [2, 2**90, 2**300]:
self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
self.assertRaises(ValueError, math.log1p, -1)
self.assertEqual(math.log1p(INF), INF)
@requires_IEEE_754
def testLog2(self):
self.assertRaises(TypeError, math.log2)
# Check some integer values
self.assertEqual(math.log2(1), 0.0)
self.assertEqual(math.log2(2), 1.0)
self.assertEqual(math.log2(4), 2.0)
# Large integer values
self.assertEqual(math.log2(2**1023), 1023.0)
self.assertEqual(math.log2(2**1024), 1024.0)
self.assertEqual(math.log2(2**2000), 2000.0)
self.assertRaises(ValueError, math.log2, -1.5)
self.assertRaises(ValueError, math.log2, NINF)
self.assertTrue(math.isnan(math.log2(NAN)))
@requires_IEEE_754
# log2() is not accurate enough on Mac OS X Tiger (10.4)
@support.requires_mac_ver(10, 5)
def testLog2Exact(self):
# Check that we get exact equality for log2 of powers of 2.
actual = [math.log2(math.ldexp(1.0, n)) for n in range(-1074, 1024)]
expected = [float(n) for n in range(-1074, 1024)]
self.assertEqual(actual, expected)
def testLog10(self):
self.assertRaises(TypeError, math.log10)
self.ftest('log10(0.1)', math.log10(0.1), -1)
self.ftest('log10(1)', math.log10(1), 0)
self.ftest('log10(10)', math.log10(10), 1)
self.ftest('log10(10**1000)', math.log10(10**1000), 1000.0)
self.assertRaises(ValueError, math.log10, -1.5)
self.assertRaises(ValueError, math.log10, -10**1000)
self.assertRaises(ValueError, math.log10, NINF)
self.assertEqual(math.log(INF), INF)
self.assertTrue(math.isnan(math.log10(NAN)))
def testModf(self):
self.assertRaises(TypeError, math.modf)
def testmodf(name, result, expected):
(v1, v2), (e1, e2) = result, expected
if abs(v1-e1) > eps or abs(v2-e2):
self.fail('%s returned %r, expected %r'%\
(name, result, expected))
testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
self.assertEqual(math.modf(INF), (0.0, INF))
self.assertEqual(math.modf(NINF), (-0.0, NINF))
modf_nan = math.modf(NAN)
self.assertTrue(math.isnan(modf_nan[0]))
self.assertTrue(math.isnan(modf_nan[1]))
def testPow(self):
self.assertRaises(TypeError, math.pow)
self.ftest('pow(0,1)', math.pow(0,1), 0)
self.ftest('pow(1,0)', math.pow(1,0), 1)
self.ftest('pow(2,1)', math.pow(2,1), 2)
self.ftest('pow(2,-1)', math.pow(2,-1), 0.5)
self.assertEqual(math.pow(INF, 1), INF)
self.assertEqual(math.pow(NINF, 1), NINF)
self.assertEqual((math.pow(1, INF)), 1.)
self.assertEqual((math.pow(1, NINF)), 1.)
self.assertTrue(math.isnan(math.pow(NAN, 1)))
self.assertTrue(math.isnan(math.pow(2, NAN)))
self.assertTrue(math.isnan(math.pow(0, NAN)))
self.assertEqual(math.pow(1, NAN), 1)
# pow(0., x)
self.assertEqual(math.pow(0., INF), 0.)
self.assertEqual(math.pow(0., 3.), 0.)
self.assertEqual(math.pow(0., 2.3), 0.)
self.assertEqual(math.pow(0., 2.), 0.)
self.assertEqual(math.pow(0., 0.), 1.)
self.assertEqual(math.pow(0., -0.), 1.)
self.assertRaises(ValueError, math.pow, 0., -2.)
self.assertRaises(ValueError, math.pow, 0., -2.3)
self.assertRaises(ValueError, math.pow, 0., -3.)
self.assertEqual(math.pow(0., NINF), INF)
self.assertTrue(math.isnan(math.pow(0., NAN)))
# pow(INF, x)
self.assertEqual(math.pow(INF, INF), INF)
self.assertEqual(math.pow(INF, 3.), INF)
self.assertEqual(math.pow(INF, 2.3), INF)
self.assertEqual(math.pow(INF, 2.), INF)
self.assertEqual(math.pow(INF, 0.), 1.)
self.assertEqual(math.pow(INF, -0.), 1.)
self.assertEqual(math.pow(INF, -2.), 0.)
self.assertEqual(math.pow(INF, -2.3), 0.)
self.assertEqual(math.pow(INF, -3.), 0.)
self.assertEqual(math.pow(INF, NINF), 0.)
self.assertTrue(math.isnan(math.pow(INF, NAN)))
# pow(-0., x)
self.assertEqual(math.pow(-0., INF), 0.)
self.assertEqual(math.pow(-0., 3.), -0.)
self.assertEqual(math.pow(-0., 2.3), 0.)
self.assertEqual(math.pow(-0., 2.), 0.)
self.assertEqual(math.pow(-0., 0.), 1.)
self.assertEqual(math.pow(-0., -0.), 1.)
self.assertRaises(ValueError, math.pow, -0., -2.)
self.assertRaises(ValueError, math.pow, -0., -2.3)
self.assertRaises(ValueError, math.pow, -0., -3.)
self.assertEqual(math.pow(-0., NINF), INF)
self.assertTrue(math.isnan(math.pow(-0., NAN)))
# pow(NINF, x)
self.assertEqual(math.pow(NINF, INF), INF)
self.assertEqual(math.pow(NINF, 3.), NINF)
self.assertEqual(math.pow(NINF, 2.3), INF)
self.assertEqual(math.pow(NINF, 2.), INF)
self.assertEqual(math.pow(NINF, 0.), 1.)
self.assertEqual(math.pow(NINF, -0.), 1.)
self.assertEqual(math.pow(NINF, -2.), 0.)
self.assertEqual(math.pow(NINF, -2.3), 0.)
self.assertEqual(math.pow(NINF, -3.), -0.)
self.assertEqual(math.pow(NINF, NINF), 0.)
self.assertTrue(math.isnan(math.pow(NINF, NAN)))
# pow(-1, x)
self.assertEqual(math.pow(-1., INF), 1.)
self.assertEqual(math.pow(-1., 3.), -1.)
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertEqual(math.pow(-1., 2.), 1.)
self.assertEqual(math.pow(-1., 0.), 1.)
self.assertEqual(math.pow(-1., -0.), 1.)
self.assertEqual(math.pow(-1., -2.), 1.)
self.assertRaises(ValueError, math.pow, -1., -2.3)
self.assertEqual(math.pow(-1., -3.), -1.)
self.assertEqual(math.pow(-1., NINF), 1.)
self.assertTrue(math.isnan(math.pow(-1., NAN)))
# pow(1, x)
self.assertEqual(math.pow(1., INF), 1.)
self.assertEqual(math.pow(1., 3.), 1.)
self.assertEqual(math.pow(1., 2.3), 1.)
self.assertEqual(math.pow(1., 2.), 1.)
self.assertEqual(math.pow(1., 0.), 1.)
self.assertEqual(math.pow(1., -0.), 1.)
self.assertEqual(math.pow(1., -2.), 1.)
self.assertEqual(math.pow(1., -2.3), 1.)
self.assertEqual(math.pow(1., -3.), 1.)
self.assertEqual(math.pow(1., NINF), 1.)
self.assertEqual(math.pow(1., NAN), 1.)
# pow(x, 0) should be 1 for any x
self.assertEqual(math.pow(2.3, 0.), 1.)
self.assertEqual(math.pow(-2.3, 0.), 1.)
self.assertEqual(math.pow(NAN, 0.), 1.)
self.assertEqual(math.pow(2.3, -0.), 1.)
self.assertEqual(math.pow(-2.3, -0.), 1.)
self.assertEqual(math.pow(NAN, -0.), 1.)
# pow(x, y) is invalid if x is negative and y is not integral
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertRaises(ValueError, math.pow, -15., -3.1)
# pow(x, NINF)
self.assertEqual(math.pow(1.9, NINF), 0.)
self.assertEqual(math.pow(1.1, NINF), 0.)
self.assertEqual(math.pow(0.9, NINF), INF)
self.assertEqual(math.pow(0.1, NINF), INF)
self.assertEqual(math.pow(-0.1, NINF), INF)
self.assertEqual(math.pow(-0.9, NINF), INF)
self.assertEqual(math.pow(-1.1, NINF), 0.)
self.assertEqual(math.pow(-1.9, NINF), 0.)
# pow(x, INF)
self.assertEqual(math.pow(1.9, INF), INF)
self.assertEqual(math.pow(1.1, INF), INF)
self.assertEqual(math.pow(0.9, INF), 0.)
self.assertEqual(math.pow(0.1, INF), 0.)
self.assertEqual(math.pow(-0.1, INF), 0.)
self.assertEqual(math.pow(-0.9, INF), 0.)
self.assertEqual(math.pow(-1.1, INF), INF)
self.assertEqual(math.pow(-1.9, INF), INF)
# pow(x, y) should work for x negative, y an integer
self.ftest('(-2.)**3.', math.pow(-2.0, 3.0), -8.0)
self.ftest('(-2.)**2.', math.pow(-2.0, 2.0), 4.0)
self.ftest('(-2.)**1.', math.pow(-2.0, 1.0), -2.0)
self.ftest('(-2.)**0.', math.pow(-2.0, 0.0), 1.0)
self.ftest('(-2.)**-0.', math.pow(-2.0, -0.0), 1.0)
self.ftest('(-2.)**-1.', math.pow(-2.0, -1.0), -0.5)
self.ftest('(-2.)**-2.', math.pow(-2.0, -2.0), 0.25)
self.ftest('(-2.)**-3.', math.pow(-2.0, -3.0), -0.125)
self.assertRaises(ValueError, math.pow, -2.0, -0.5)
self.assertRaises(ValueError, math.pow, -2.0, 0.5)
# the following tests have been commented out since they don't
# really belong here: the implementation of ** for floats is
# independent of the implementation of math.pow
#self.assertEqual(1**NAN, 1)
#self.assertEqual(1**INF, 1)
#self.assertEqual(1**NINF, 1)
#self.assertEqual(1**0, 1)
#self.assertEqual(1.**NAN, 1)
#self.assertEqual(1.**INF, 1)
#self.assertEqual(1.**NINF, 1)
#self.assertEqual(1.**0, 1)
def testRadians(self):
self.assertRaises(TypeError, math.radians)
self.ftest('radians(180)', math.radians(180), math.pi)
self.ftest('radians(90)', math.radians(90), math.pi/2)
self.ftest('radians(-45)', math.radians(-45), -math.pi/4)
self.ftest('radians(0)', math.radians(0), 0)
@requires_IEEE_754
def testRemainder(self):
from fractions import Fraction
def validate_spec(x, y, r):
"""
Check that r matches remainder(x, y) according to the IEEE 754
specification. Assumes that x, y and r are finite and y is nonzero.
"""
fx, fy, fr = Fraction(x), Fraction(y), Fraction(r)
# r should not exceed y/2 in absolute value
self.assertLessEqual(abs(fr), abs(fy/2))
# x - r should be an exact integer multiple of y
n = (fx - fr) / fy
self.assertEqual(n, int(n))
if abs(fr) == abs(fy/2):
# If |r| == |y/2|, n should be even.
self.assertEqual(n/2, int(n/2))
# triples (x, y, remainder(x, y)) in hexadecimal form.
testcases = [
# Remainders modulo 1, showing the ties-to-even behaviour.
'-4.0 1 -0.0',
'-3.8 1 0.8',
'-3.0 1 -0.0',
'-2.8 1 -0.8',
'-2.0 1 -0.0',
'-1.8 1 0.8',
'-1.0 1 -0.0',
'-0.8 1 -0.8',
'-0.0 1 -0.0',
' 0.0 1 0.0',
' 0.8 1 0.8',
' 1.0 1 0.0',
' 1.8 1 -0.8',
' 2.0 1 0.0',
' 2.8 1 0.8',
' 3.0 1 0.0',
' 3.8 1 -0.8',
' 4.0 1 0.0',
# Reductions modulo 2*pi
'0x0.0p+0 0x1.921fb54442d18p+2 0x0.0p+0',
'0x1.921fb54442d18p+0 0x1.921fb54442d18p+2 0x1.921fb54442d18p+0',
'0x1.921fb54442d17p+1 0x1.921fb54442d18p+2 0x1.921fb54442d17p+1',
'0x1.921fb54442d18p+1 0x1.921fb54442d18p+2 0x1.921fb54442d18p+1',
'0x1.921fb54442d19p+1 0x1.921fb54442d18p+2 -0x1.921fb54442d17p+1',
'0x1.921fb54442d17p+2 0x1.921fb54442d18p+2 -0x0.0000000000001p+2',
'0x1.921fb54442d18p+2 0x1.921fb54442d18p+2 0x0p0',
'0x1.921fb54442d19p+2 0x1.921fb54442d18p+2 0x0.0000000000001p+2',
'0x1.2d97c7f3321d1p+3 0x1.921fb54442d18p+2 0x1.921fb54442d14p+1',
'0x1.2d97c7f3321d2p+3 0x1.921fb54442d18p+2 -0x1.921fb54442d18p+1',
'0x1.2d97c7f3321d3p+3 0x1.921fb54442d18p+2 -0x1.921fb54442d14p+1',
'0x1.921fb54442d17p+3 0x1.921fb54442d18p+2 -0x0.0000000000001p+3',
'0x1.921fb54442d18p+3 0x1.921fb54442d18p+2 0x0p0',
'0x1.921fb54442d19p+3 0x1.921fb54442d18p+2 0x0.0000000000001p+3',
'0x1.f6a7a2955385dp+3 0x1.921fb54442d18p+2 0x1.921fb54442d14p+1',
'0x1.f6a7a2955385ep+3 0x1.921fb54442d18p+2 0x1.921fb54442d18p+1',
'0x1.f6a7a2955385fp+3 0x1.921fb54442d18p+2 -0x1.921fb54442d14p+1',
'0x1.1475cc9eedf00p+5 0x1.921fb54442d18p+2 0x1.921fb54442d10p+1',
'0x1.1475cc9eedf01p+5 0x1.921fb54442d18p+2 -0x1.921fb54442d10p+1',
# Symmetry with respect to signs.
' 1 0.c 0.4',
'-1 0.c -0.4',
' 1 -0.c 0.4',
'-1 -0.c -0.4',
' 1.4 0.c -0.4',
'-1.4 0.c 0.4',
' 1.4 -0.c -0.4',
'-1.4 -0.c 0.4',
# Huge modulus, to check that the underlying algorithm doesn't
# rely on 2.0 * modulus being representable.
'0x1.dp+1023 0x1.4p+1023 0x0.9p+1023',
'0x1.ep+1023 0x1.4p+1023 -0x0.ap+1023',
'0x1.fp+1023 0x1.4p+1023 -0x0.9p+1023',
]
for case in testcases:
with self.subTest(case=case):
x_hex, y_hex, expected_hex = case.split()
x = float.fromhex(x_hex)
y = float.fromhex(y_hex)
expected = float.fromhex(expected_hex)
validate_spec(x, y, expected)
actual = math.remainder(x, y)
# Cheap way of checking that the floats are
# as identical as we need them to be.
self.assertEqual(actual.hex(), expected.hex())
# Test tiny subnormal modulus: there's potential for
# getting the implementation wrong here (for example,
# by assuming that modulus/2 is exactly representable).
tiny = float.fromhex('1p-1074') # min +ve subnormal
for n in range(-25, 25):
if n == 0:
continue
y = n * tiny
for m in range(100):
x = m * tiny
actual = math.remainder(x, y)
validate_spec(x, y, actual)
actual = math.remainder(-x, y)
validate_spec(-x, y, actual)
# Special values.
# NaNs should propagate as usual.
for value in [NAN, 0.0, -0.0, 2.0, -2.3, NINF, INF]:
self.assertIsNaN(math.remainder(NAN, value))
self.assertIsNaN(math.remainder(value, NAN))
# remainder(x, inf) is x, for non-nan non-infinite x.
for value in [-2.3, -0.0, 0.0, 2.3]:
self.assertEqual(math.remainder(value, INF), value)
self.assertEqual(math.remainder(value, NINF), value)
# remainder(x, 0) and remainder(infinity, x) for non-NaN x are invalid
# operations according to IEEE 754-2008 7.2(f), and should raise.
for value in [NINF, -2.3, -0.0, 0.0, 2.3, INF]:
with self.assertRaises(ValueError):
math.remainder(INF, value)
with self.assertRaises(ValueError):
math.remainder(NINF, value)
with self.assertRaises(ValueError):
math.remainder(value, 0.0)
with self.assertRaises(ValueError):
math.remainder(value, -0.0)
def testSin(self):
self.assertRaises(TypeError, math.sin)
self.ftest('sin(0)', math.sin(0), 0)
self.ftest('sin(pi/2)', math.sin(math.pi/2), 1)
self.ftest('sin(-pi/2)', math.sin(-math.pi/2), -1)
try:
self.assertTrue(math.isnan(math.sin(INF)))
self.assertTrue(math.isnan(math.sin(NINF)))
except ValueError:
self.assertRaises(ValueError, math.sin, INF)
self.assertRaises(ValueError, math.sin, NINF)
self.assertTrue(math.isnan(math.sin(NAN)))
def testSinh(self):
self.assertRaises(TypeError, math.sinh)
self.ftest('sinh(0)', math.sinh(0), 0)
self.ftest('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
self.ftest('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
self.assertEqual(math.sinh(INF), INF)
self.assertEqual(math.sinh(NINF), NINF)
self.assertTrue(math.isnan(math.sinh(NAN)))
def testSqrt(self):
self.assertRaises(TypeError, math.sqrt)
self.ftest('sqrt(0)', math.sqrt(0), 0)
self.ftest('sqrt(0)', math.sqrt(0.0), 0.0)
self.ftest('sqrt(2.5)', math.sqrt(2.5), 1.5811388300841898)
self.ftest('sqrt(0.25)', math.sqrt(0.25), 0.5)
self.ftest('sqrt(25.25)', math.sqrt(25.25), 5.024937810560445)
self.ftest('sqrt(1)', math.sqrt(1), 1)
self.ftest('sqrt(4)', math.sqrt(4), 2)
self.assertEqual(math.sqrt(INF), INF)
self.assertRaises(ValueError, math.sqrt, -1)
self.assertRaises(ValueError, math.sqrt, NINF)
self.assertTrue(math.isnan(math.sqrt(NAN)))
def testTan(self):
self.assertRaises(TypeError, math.tan)
self.ftest('tan(0)', math.tan(0), 0)
self.ftest('tan(pi/4)', math.tan(math.pi/4), 1)
self.ftest('tan(-pi/4)', math.tan(-math.pi/4), -1)
try:
self.assertTrue(math.isnan(math.tan(INF)))
self.assertTrue(math.isnan(math.tan(NINF)))
except:
self.assertRaises(ValueError, math.tan, INF)
self.assertRaises(ValueError, math.tan, NINF)
self.assertTrue(math.isnan(math.tan(NAN)))
def testTanh(self):
self.assertRaises(TypeError, math.tanh)
self.ftest('tanh(0)', math.tanh(0), 0)
self.ftest('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0,
abs_tol=math.ulp(1))
self.ftest('tanh(inf)', math.tanh(INF), 1)
self.ftest('tanh(-inf)', math.tanh(NINF), -1)
self.assertTrue(math.isnan(math.tanh(NAN)))
@requires_IEEE_754
def testTanhSign(self):
# check that tanh(-0.) == -0. on IEEE 754 systems
self.assertEqual(math.tanh(-0.), -0.)
self.assertEqual(math.copysign(1., math.tanh(-0.)),
math.copysign(1., -0.))
def test_trunc(self):
self.assertEqual(math.trunc(1), 1)
self.assertEqual(math.trunc(-1), -1)
self.assertEqual(type(math.trunc(1)), int)
self.assertEqual(type(math.trunc(1.5)), int)
self.assertEqual(math.trunc(1.5), 1)
self.assertEqual(math.trunc(-1.5), -1)
self.assertEqual(math.trunc(1.999999), 1)
self.assertEqual(math.trunc(-1.999999), -1)
self.assertEqual(math.trunc(-0.999999), -0)
self.assertEqual(math.trunc(-100.999), -100)
class TestTrunc:
def __trunc__(self):
return 23
class FloatTrunc(float):
def __trunc__(self):
return 23
class TestNoTrunc:
pass
self.assertEqual(math.trunc(TestTrunc()), 23)
self.assertEqual(math.trunc(FloatTrunc()), 23)
self.assertRaises(TypeError, math.trunc)
self.assertRaises(TypeError, math.trunc, 1, 2)
self.assertRaises(TypeError, math.trunc, FloatLike(23.5))
self.assertRaises(TypeError, math.trunc, TestNoTrunc())
def testIsfinite(self):
self.assertTrue(math.isfinite(0.0))
self.assertTrue(math.isfinite(-0.0))
self.assertTrue(math.isfinite(1.0))
self.assertTrue(math.isfinite(-1.0))
self.assertFalse(math.isfinite(float("nan")))
self.assertFalse(math.isfinite(float("inf")))
self.assertFalse(math.isfinite(float("-inf")))
def testIsnan(self):
self.assertTrue(math.isnan(float("nan")))
self.assertTrue(math.isnan(float("-nan")))
self.assertTrue(math.isnan(float("inf") * 0.))
self.assertFalse(math.isnan(float("inf")))
self.assertFalse(math.isnan(0.))
self.assertFalse(math.isnan(1.))
def testIsinf(self):
self.assertTrue(math.isinf(float("inf")))
self.assertTrue(math.isinf(float("-inf")))
self.assertTrue(math.isinf(1E400))
self.assertTrue(math.isinf(-1E400))
self.assertFalse(math.isinf(float("nan")))
self.assertFalse(math.isinf(0.))
self.assertFalse(math.isinf(1.))
@requires_IEEE_754
def test_nan_constant(self):
self.assertTrue(math.isnan(math.nan))
@requires_IEEE_754
def test_inf_constant(self):
self.assertTrue(math.isinf(math.inf))
self.assertGreater(math.inf, 0.0)
self.assertEqual(math.inf, float("inf"))
self.assertEqual(-math.inf, float("-inf"))
# RED_FLAG 16-Oct-2000 Tim
# While 2.0 is more consistent about exceptions than previous releases, it
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
@unittest.skipUnless(verbose, 'requires verbose mode')
def test_exceptions(self):
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
self.fail("underflowing exp() should not have raised "
"an exception")
if x != 0:
self.fail("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
self.fail("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except ValueError:
pass
else:
self.fail("sqrt(-1) didn't raise ValueError")
@requires_IEEE_754
def test_testfile(self):
# Some tests need to be skipped on ancient OS X versions.
# See issue #27953.
SKIP_ON_TIGER = {'tan0064'}
osx_version = None
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
osx_version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
fail_fmt = "{}: {}({!r}): {}"
failures = []
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
# Skip if either the input or result is complex
if ai != 0.0 or ei != 0.0:
continue
if fn in ['rect', 'polar']:
# no real versions of rect, polar
continue
# Skip certain tests on OS X 10.4.
if osx_version is not None and osx_version < (10, 5):
if id in SKIP_ON_TIGER:
continue
func = getattr(math, fn)
if 'invalid' in flags or 'divide-by-zero' in flags:
er = 'ValueError'
elif 'overflow' in flags:
er = 'OverflowError'
try:
result = func(ar)
except ValueError:
result = 'ValueError'
except OverflowError:
result = 'OverflowError'
# Default tolerances
ulp_tol, abs_tol = 5, 0.0
failure = result_check(er, result, ulp_tol, abs_tol)
if failure is None:
continue
msg = fail_fmt.format(id, fn, ar, failure)
failures.append(msg)
if failures:
self.fail('Failures in test_testfile:\n ' +
'\n '.join(failures))
@requires_IEEE_754
def test_mtestfile(self):
fail_fmt = "{}: {}({!r}): {}"
failures = []
for id, fn, arg, expected, flags in parse_mtestfile(math_testcases):
func = getattr(math, fn)
if 'invalid' in flags or 'divide-by-zero' in flags:
expected = 'ValueError'
elif 'overflow' in flags:
expected = 'OverflowError'
try:
got = func(arg)
except ValueError:
got = 'ValueError'
except OverflowError:
got = 'OverflowError'
# Default tolerances
ulp_tol, abs_tol = 5, 0.0
# Exceptions to the defaults
if fn == 'gamma':
# Experimental results on one platform gave
# an accuracy of <= 10 ulps across the entire float
# domain. We weaken that to require 20 ulp accuracy.
ulp_tol = 20
elif fn == 'lgamma':
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
abs_tol = 1e-15
elif fn == 'erfc' and arg >= 0.0:
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# Observed between CPython and mpmath at 25 dp:
# x < 0 : err <= 2 ulp
# 0 <= x < 1 : err <= 10 ulp
# 1 <= x < 10 : err <= 100 ulp
# 10 <= x < 20 : err <= 300 ulp
# 20 <= x : < 600 ulp
#
if arg < 1.0:
ulp_tol = 10
elif arg < 10.0:
ulp_tol = 100
else:
ulp_tol = 1000
failure = result_check(expected, got, ulp_tol, abs_tol)
if failure is None:
continue
msg = fail_fmt.format(id, fn, arg, failure)
failures.append(msg)
if failures:
self.fail('Failures in test_mtestfile:\n ' +
'\n '.join(failures))
def test_prod(self):
prod = math.prod
self.assertEqual(prod([]), 1)
self.assertEqual(prod([], start=5), 5)
self.assertEqual(prod(list(range(2,8))), 5040)
self.assertEqual(prod(iter(list(range(2,8)))), 5040)
self.assertEqual(prod(range(1, 10), start=10), 3628800)
self.assertEqual(prod([1, 2, 3, 4, 5]), 120)
self.assertEqual(prod([1.0, 2.0, 3.0, 4.0, 5.0]), 120.0)
self.assertEqual(prod([1, 2, 3, 4.0, 5.0]), 120.0)
self.assertEqual(prod([1.0, 2.0, 3.0, 4, 5]), 120.0)
# Test overflow in fast-path for integers
self.assertEqual(prod([1, 1, 2**32, 1, 1]), 2**32)
# Test overflow in fast-path for floats
self.assertEqual(prod([1.0, 1.0, 2**32, 1, 1]), float(2**32))
self.assertRaises(TypeError, prod)
self.assertRaises(TypeError, prod, 42)
self.assertRaises(TypeError, prod, ['a', 'b', 'c'])
self.assertRaises(TypeError, prod, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, prod, [b'a', b'c'], b'')
values = [bytearray(b'a'), bytearray(b'b')]
self.assertRaises(TypeError, prod, values, bytearray(b''))
self.assertRaises(TypeError, prod, [[1], [2], [3]])
self.assertRaises(TypeError, prod, [{2:3}])
self.assertRaises(TypeError, prod, [{2:3}]*2, {2:3})
self.assertRaises(TypeError, prod, [[1], [2], [3]], [])
with self.assertRaises(TypeError):
prod([10, 20], [30, 40]) # start is a keyword-only argument
self.assertEqual(prod([0, 1, 2, 3]), 0)
self.assertEqual(prod([1, 0, 2, 3]), 0)
self.assertEqual(prod([1, 2, 3, 0]), 0)
def _naive_prod(iterable, start=1):
for elem in iterable:
start *= elem
return start
# Big integers
iterable = range(1, 10000)
self.assertEqual(prod(iterable), _naive_prod(iterable))
iterable = range(-10000, -1)
self.assertEqual(prod(iterable), _naive_prod(iterable))
iterable = range(-1000, 1000)
self.assertEqual(prod(iterable), 0)
# Big floats
iterable = [float(x) for x in range(1, 1000)]
self.assertEqual(prod(iterable), _naive_prod(iterable))
iterable = [float(x) for x in range(-1000, -1)]
self.assertEqual(prod(iterable), _naive_prod(iterable))
iterable = [float(x) for x in range(-1000, 1000)]
self.assertIsNaN(prod(iterable))
# Float tests
self.assertIsNaN(prod([1, 2, 3, float("nan"), 2, 3]))
self.assertIsNaN(prod([1, 0, float("nan"), 2, 3]))
self.assertIsNaN(prod([1, float("nan"), 0, 3]))
self.assertIsNaN(prod([1, float("inf"), float("nan"),3]))
self.assertIsNaN(prod([1, float("-inf"), float("nan"),3]))
self.assertIsNaN(prod([1, float("nan"), float("inf"),3]))
self.assertIsNaN(prod([1, float("nan"), float("-inf"),3]))
self.assertEqual(prod([1, 2, 3, float('inf'),-3,4]), float('-inf'))
self.assertEqual(prod([1, 2, 3, float('-inf'),-3,4]), float('inf'))
self.assertIsNaN(prod([1,2,0,float('inf'), -3, 4]))
self.assertIsNaN(prod([1,2,0,float('-inf'), -3, 4]))
self.assertIsNaN(prod([1, 2, 3, float('inf'), -3, 0, 3]))
self.assertIsNaN(prod([1, 2, 3, float('-inf'), -3, 0, 2]))
# Type preservation
self.assertEqual(type(prod([1, 2, 3, 4, 5, 6])), int)
self.assertEqual(type(prod([1, 2.0, 3, 4, 5, 6])), float)
self.assertEqual(type(prod(range(1, 10000))), int)
self.assertEqual(type(prod(range(1, 10000), start=1.0)), float)
self.assertEqual(type(prod([1, decimal.Decimal(2.0), 3, 4, 5, 6])),
decimal.Decimal)
def testPerm(self):
perm = math.perm
factorial = math.factorial
# Test if factorial definition is satisfied
for n in range(100):
for k in range(n + 1):
self.assertEqual(perm(n, k),
factorial(n) // factorial(n - k))
# Test for Pascal's identity
for n in range(1, 100):
for k in range(1, n):
self.assertEqual(perm(n, k), perm(n - 1, k - 1) * k + perm(n - 1, k))
# Test corner cases
for n in range(1, 100):
self.assertEqual(perm(n, 0), 1)
self.assertEqual(perm(n, 1), n)
self.assertEqual(perm(n, n), factorial(n))
# Test one argument form
for n in range(20):
self.assertEqual(perm(n), factorial(n))
self.assertEqual(perm(n, None), factorial(n))
# Raises TypeError if any argument is non-integer or argument count is
# not 1 or 2
self.assertRaises(TypeError, perm, 10, 1.0)
self.assertRaises(TypeError, perm, 10, decimal.Decimal(1.0))
self.assertRaises(TypeError, perm, 10, "1")
self.assertRaises(TypeError, perm, 10.0, 1)
self.assertRaises(TypeError, perm, decimal.Decimal(10.0), 1)
self.assertRaises(TypeError, perm, "10", 1)
self.assertRaises(TypeError, perm)
self.assertRaises(TypeError, perm, 10, 1, 3)
self.assertRaises(TypeError, perm)
# Raises Value error if not k or n are negative numbers
self.assertRaises(ValueError, perm, -1, 1)
self.assertRaises(ValueError, perm, -2**1000, 1)
self.assertRaises(ValueError, perm, 1, -1)
self.assertRaises(ValueError, perm, 1, -2**1000)
# Returns zero if k is greater than n
self.assertEqual(perm(1, 2), 0)
self.assertEqual(perm(1, 2**1000), 0)
n = 2**1000
self.assertEqual(perm(n, 0), 1)
self.assertEqual(perm(n, 1), n)
self.assertEqual(perm(n, 2), n * (n-1))
if support.check_impl_detail(cpython=True):
self.assertRaises(OverflowError, perm, n, n)
for n, k in (True, True), (True, False), (False, False):
self.assertEqual(perm(n, k), 1)
self.assertIs(type(perm(n, k)), int)
self.assertEqual(perm(IntSubclass(5), IntSubclass(2)), 20)
self.assertEqual(perm(MyIndexable(5), MyIndexable(2)), 20)
for k in range(3):
self.assertIs(type(perm(IntSubclass(5), IntSubclass(k))), int)
self.assertIs(type(perm(MyIndexable(5), MyIndexable(k))), int)
def testComb(self):
comb = math.comb
factorial = math.factorial
# Test if factorial definition is satisfied
for n in range(100):
for k in range(n + 1):
self.assertEqual(comb(n, k), factorial(n)
// (factorial(k) * factorial(n - k)))
# Test for Pascal's identity
for n in range(1, 100):
for k in range(1, n):
self.assertEqual(comb(n, k), comb(n - 1, k - 1) + comb(n - 1, k))
# Test corner cases
for n in range(100):
self.assertEqual(comb(n, 0), 1)
self.assertEqual(comb(n, n), 1)
for n in range(1, 100):
self.assertEqual(comb(n, 1), n)
self.assertEqual(comb(n, n - 1), n)
# Test Symmetry
for n in range(100):
for k in range(n // 2):
self.assertEqual(comb(n, k), comb(n, n - k))
# Raises TypeError if any argument is non-integer or argument count is
# not 2
self.assertRaises(TypeError, comb, 10, 1.0)
self.assertRaises(TypeError, comb, 10, decimal.Decimal(1.0))
self.assertRaises(TypeError, comb, 10, "1")
self.assertRaises(TypeError, comb, 10.0, 1)
self.assertRaises(TypeError, comb, decimal.Decimal(10.0), 1)
self.assertRaises(TypeError, comb, "10", 1)
self.assertRaises(TypeError, comb, 10)
self.assertRaises(TypeError, comb, 10, 1, 3)
self.assertRaises(TypeError, comb)
# Raises Value error if not k or n are negative numbers
self.assertRaises(ValueError, comb, -1, 1)
self.assertRaises(ValueError, comb, -2**1000, 1)
self.assertRaises(ValueError, comb, 1, -1)
self.assertRaises(ValueError, comb, 1, -2**1000)
# Returns zero if k is greater than n
self.assertEqual(comb(1, 2), 0)
self.assertEqual(comb(1, 2**1000), 0)
n = 2**1000
self.assertEqual(comb(n, 0), 1)
self.assertEqual(comb(n, 1), n)
self.assertEqual(comb(n, 2), n * (n-1) // 2)
self.assertEqual(comb(n, n), 1)
self.assertEqual(comb(n, n-1), n)
self.assertEqual(comb(n, n-2), n * (n-1) // 2)
if support.check_impl_detail(cpython=True):
self.assertRaises(OverflowError, comb, n, n//2)
for n, k in (True, True), (True, False), (False, False):
self.assertEqual(comb(n, k), 1)
self.assertIs(type(comb(n, k)), int)
self.assertEqual(comb(IntSubclass(5), IntSubclass(2)), 10)
self.assertEqual(comb(MyIndexable(5), MyIndexable(2)), 10)
for k in range(3):
self.assertIs(type(comb(IntSubclass(5), IntSubclass(k))), int)
self.assertIs(type(comb(MyIndexable(5), MyIndexable(k))), int)
@requires_IEEE_754
def test_nextafter(self):
# around 2^52 and 2^63
self.assertEqual(math.nextafter(4503599627370496.0, -INF),
4503599627370495.5)
self.assertEqual(math.nextafter(4503599627370496.0, INF),
4503599627370497.0)
self.assertEqual(math.nextafter(9223372036854775808.0, 0.0),
9223372036854774784.0)
self.assertEqual(math.nextafter(-9223372036854775808.0, 0.0),
-9223372036854774784.0)
# around 1.0
self.assertEqual(math.nextafter(1.0, -INF),
float.fromhex('0x1.fffffffffffffp-1'))
self.assertEqual(math.nextafter(1.0, INF),
float.fromhex('0x1.0000000000001p+0'))
# x == y: y is returned
self.assertEqual(math.nextafter(2.0, 2.0), 2.0)
self.assertEqualSign(math.nextafter(-0.0, +0.0), +0.0)
self.assertEqualSign(math.nextafter(+0.0, -0.0), -0.0)
# around 0.0
smallest_subnormal = sys.float_info.min * sys.float_info.epsilon
self.assertEqual(math.nextafter(+0.0, INF), smallest_subnormal)
self.assertEqual(math.nextafter(-0.0, INF), smallest_subnormal)
self.assertEqual(math.nextafter(+0.0, -INF), -smallest_subnormal)
self.assertEqual(math.nextafter(-0.0, -INF), -smallest_subnormal)
self.assertEqualSign(math.nextafter(smallest_subnormal, +0.0), +0.0)
self.assertEqualSign(math.nextafter(-smallest_subnormal, +0.0), -0.0)
self.assertEqualSign(math.nextafter(smallest_subnormal, -0.0), +0.0)
self.assertEqualSign(math.nextafter(-smallest_subnormal, -0.0), -0.0)
# around infinity
largest_normal = sys.float_info.max
self.assertEqual(math.nextafter(INF, 0.0), largest_normal)
self.assertEqual(math.nextafter(-INF, 0.0), -largest_normal)
self.assertEqual(math.nextafter(largest_normal, INF), INF)
self.assertEqual(math.nextafter(-largest_normal, -INF), -INF)
# NaN
self.assertIsNaN(math.nextafter(NAN, 1.0))
self.assertIsNaN(math.nextafter(1.0, NAN))
self.assertIsNaN(math.nextafter(NAN, NAN))
@requires_IEEE_754
def test_ulp(self):
self.assertEqual(math.ulp(1.0), sys.float_info.epsilon)
# use int ** int rather than float ** int to not rely on pow() accuracy
self.assertEqual(math.ulp(2 ** 52), 1.0)
self.assertEqual(math.ulp(2 ** 53), 2.0)
self.assertEqual(math.ulp(2 ** 64), 4096.0)
# min and max
self.assertEqual(math.ulp(0.0),
sys.float_info.min * sys.float_info.epsilon)
self.assertEqual(math.ulp(FLOAT_MAX),
FLOAT_MAX - math.nextafter(FLOAT_MAX, -INF))
# special cases
self.assertEqual(math.ulp(INF), INF)
self.assertIsNaN(math.ulp(math.nan))
# negative number: ulp(-x) == ulp(x)
for x in (0.0, 1.0, 2 ** 52, 2 ** 64, INF):
with self.subTest(x=x):
self.assertEqual(math.ulp(-x), math.ulp(x))
def test_issue39871(self):
# A SystemError should not be raised if the first arg to atan2(),
# copysign(), or remainder() cannot be converted to a float.
class F:
def __float__(self):
self.converted = True
1/0
for func in math.atan2, math.copysign, math.remainder:
y = F()
with self.assertRaises(TypeError):
func("not a number", y)
# There should not have been any attempt to convert the second
# argument to a float.
self.assertFalse(getattr(y, "converted", False))
# Custom assertions.
def assertIsNaN(self, value):
if not math.isnan(value):
self.fail("Expected a NaN, got {!r}.".format(value))
def assertEqualSign(self, x, y):
"""Similar to assertEqual(), but compare also the sign with copysign().
Function useful to compare signed zeros.
"""
self.assertEqual(x, y)
self.assertEqual(math.copysign(1.0, x), math.copysign(1.0, y))
class IsCloseTests(unittest.TestCase):
isclose = math.isclose # subclasses should override this
def assertIsClose(self, a, b, *args, **kwargs):
self.assertTrue(self.isclose(a, b, *args, **kwargs),
msg="%s and %s should be close!" % (a, b))
def assertIsNotClose(self, a, b, *args, **kwargs):
self.assertFalse(self.isclose(a, b, *args, **kwargs),
msg="%s and %s should not be close!" % (a, b))
def assertAllClose(self, examples, *args, **kwargs):
for a, b in examples:
self.assertIsClose(a, b, *args, **kwargs)
def assertAllNotClose(self, examples, *args, **kwargs):
for a, b in examples:
self.assertIsNotClose(a, b, *args, **kwargs)
def test_negative_tolerances(self):
# ValueError should be raised if either tolerance is less than zero
with self.assertRaises(ValueError):
self.assertIsClose(1, 1, rel_tol=-1e-100)
with self.assertRaises(ValueError):
self.assertIsClose(1, 1, rel_tol=1e-100, abs_tol=-1e10)
def test_identical(self):
# identical values must test as close
identical_examples = [(2.0, 2.0),
(0.1e200, 0.1e200),
(1.123e-300, 1.123e-300),
(12345, 12345.0),
(0.0, -0.0),
(345678, 345678)]
self.assertAllClose(identical_examples, rel_tol=0.0, abs_tol=0.0)
def test_eight_decimal_places(self):
# examples that are close to 1e-8, but not 1e-9
eight_decimal_places_examples = [(1e8, 1e8 + 1),
(-1e-8, -1.000000009e-8),
(1.12345678, 1.12345679)]
self.assertAllClose(eight_decimal_places_examples, rel_tol=1e-8)
self.assertAllNotClose(eight_decimal_places_examples, rel_tol=1e-9)
def test_near_zero(self):
# values close to zero
near_zero_examples = [(1e-9, 0.0),
(-1e-9, 0.0),
(-1e-150, 0.0)]
# these should not be close to any rel_tol
self.assertAllNotClose(near_zero_examples, rel_tol=0.9)
# these should be close to abs_tol=1e-8
self.assertAllClose(near_zero_examples, abs_tol=1e-8)
def test_identical_infinite(self):
# these are close regardless of tolerance -- i.e. they are equal
self.assertIsClose(INF, INF)
self.assertIsClose(INF, INF, abs_tol=0.0)
self.assertIsClose(NINF, NINF)
self.assertIsClose(NINF, NINF, abs_tol=0.0)
def test_inf_ninf_nan(self):
# these should never be close (following IEEE 754 rules for equality)
not_close_examples = [(NAN, NAN),
(NAN, 1e-100),
(1e-100, NAN),
(INF, NAN),
(NAN, INF),
(INF, NINF),
(INF, 1.0),
(1.0, INF),
(INF, 1e308),
(1e308, INF)]
# use largest reasonable tolerance
self.assertAllNotClose(not_close_examples, abs_tol=0.999999999999999)
def test_zero_tolerance(self):
# test with zero tolerance
zero_tolerance_close_examples = [(1.0, 1.0),
(-3.4, -3.4),
(-1e-300, -1e-300)]
self.assertAllClose(zero_tolerance_close_examples, rel_tol=0.0)
zero_tolerance_not_close_examples = [(1.0, 1.000000000000001),
(0.99999999999999, 1.0),
(1.0e200, .999999999999999e200)]
self.assertAllNotClose(zero_tolerance_not_close_examples, rel_tol=0.0)
def test_asymmetry(self):
# test the asymmetry example from PEP 485
self.assertAllClose([(9, 10), (10, 9)], rel_tol=0.1)
def test_integers(self):
# test with integer values
integer_examples = [(100000001, 100000000),
(123456789, 123456788)]
self.assertAllClose(integer_examples, rel_tol=1e-8)
self.assertAllNotClose(integer_examples, rel_tol=1e-9)
def test_decimals(self):
# test with Decimal values
from decimal import Decimal
decimal_examples = [(Decimal('1.00000001'), Decimal('1.0')),
(Decimal('1.00000001e-20'), Decimal('1.0e-20')),
(Decimal('1.00000001e-100'), Decimal('1.0e-100')),
(Decimal('1.00000001e20'), Decimal('1.0e20'))]
self.assertAllClose(decimal_examples, rel_tol=1e-8)
self.assertAllNotClose(decimal_examples, rel_tol=1e-9)
def test_fractions(self):
# test with Fraction values
from fractions import Fraction
fraction_examples = [
(Fraction(1, 100000000) + 1, Fraction(1)),
(Fraction(100000001), Fraction(100000000)),
(Fraction(10**8 + 1, 10**28), Fraction(1, 10**20))]
self.assertAllClose(fraction_examples, rel_tol=1e-8)
self.assertAllNotClose(fraction_examples, rel_tol=1e-9)
def test_main():
from doctest import DocFileSuite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MathTests))
suite.addTest(unittest.makeSuite(IsCloseTests))
suite.addTest(DocFileSuite("ieee754.txt"))
run_unittest(suite)
if __name__ == '__main__':
test_main()
| 41.739053
| 98
| 0.568342
|
7a9a8e0f955e87c6b580379552976c8e04e01719
| 21,004
|
py
|
Python
|
sfcollector/solidfire_graphite_collector.py
|
scaleoutsean/hcicollector
|
e30652c0e72f46937f049c7e44e3f8d5516a7b40
|
[
"Apache-2.0"
] | 3
|
2020-01-30T19:56:27.000Z
|
2020-06-09T10:50:53.000Z
|
sfcollector/solidfire_graphite_collector.py
|
scaleoutsean/hcicollector
|
e30652c0e72f46937f049c7e44e3f8d5516a7b40
|
[
"Apache-2.0"
] | 15
|
2020-02-04T14:09:05.000Z
|
2021-12-26T08:52:53.000Z
|
sfcollector/solidfire_graphite_collector.py
|
scaleoutsean/hcicollector
|
e30652c0e72f46937f049c7e44e3f8d5516a7b40
|
[
"Apache-2.0"
] | 1
|
2020-02-20T04:21:28.000Z
|
2020-02-20T04:21:28.000Z
|
#!/usr/bin/env python
# solidfire_graphite_collector.py
#
# Version 1.0.5
#
# Original authors: Colin Bieberstein, Aaron Patten
# Contributors: Pablo Luis Zorzoli, Davide Obbi, scaleoutSean
#
# Copyright (c) 2020 NetApp, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import graphyte
from solidfire.factory import ElementFactory
import solidfire.common
import logging
def send_cluster_faults(sfe, prefix):
"""
send active cluster fault counts by: warning, error, critical
exclude best practices, and only include current faults
"""
fault_list = sfe.list_cluster_faults(False, "current").to_json()['faults']
group = {'critical': 0, 'error': 0, 'warning': 0}
for d in fault_list:
if d['severity'] not in group:
group[d['severity']] = 1
else:
group[d['severity']] += 1
if to_graphite:
for key in group:
graphyte.send(prefix + '.fault.' + key, to_num(group[key]))
else:
for key in group:
LOG.warning(prefix + '.fault.' + key + ' ' + str(group[key]))
def send_cluster_stats(sfe, prefix):
"""
send a subset of GetClusterStats API call results to graphite.
"""
metrics = ['clientQueueDepth', 'clusterUtilization', 'readOpsLastSample',
'readBytesLastSample', 'writeOpsLastSample', 'writeBytesLastSample',
'actualIOPS', 'latencyUSec', 'normalizedIOPS', 'readBytes',
'readLatencyUSec', 'readOps', 'unalignedReads', 'unalignedWrites',
'writeLatencyUSec', 'writeOps', 'writeBytes']
cluster_stats_dict = sfe.get_cluster_stats().to_json()['clusterStats']
clusterUtilizationDec = float(cluster_stats_dict['clusterUtilization'])
clusterUtilizationScaled = clusterUtilizationDec
if to_graphite:
graphyte.send(prefix + '.clusterUtilizationScaled', to_num(clusterUtilizationScaled))
else:
LOG.warning(prefix + '.clusterUtilizationScaled ' + str(clusterUtilizationScaled))
for key in metrics:
if to_graphite:
graphyte.send(prefix + '.' + key, to_num(cluster_stats_dict[key]))
else:
LOG.warning(prefix + '.' + key + ' ' + str(cluster_stats_dict[key]))
def send_cluster_capacity(sfe, prefix):
"""
send a subset of GetClusterCapacity API call results and derived metrics to graphite.
"""
metrics = ['activeBlockSpace', 'activeSessions', 'averageIOPS',
'clusterRecentIOSize', 'currentIOPS', 'maxIOPS',
'maxOverProvisionableSpace', 'maxProvisionedSpace',
'maxUsedMetadataSpace', 'maxUsedSpace', 'nonZeroBlocks',
'peakActiveSessions', 'peakIOPS', 'provisionedSpace',
'snapshotNonZeroBlocks', 'totalOps', 'uniqueBlocks',
'uniqueBlocksUsedSpace', 'usedMetadataSpace',
'usedMetadataSpaceInSnapshots', 'usedSpace', 'zeroBlocks']
result = sfe.get_cluster_capacity().to_json()['clusterCapacity']
for key in metrics:
if to_graphite:
graphyte.send(prefix + '.' + key, to_num(result[key]))
else:
LOG.warning(prefix + '.' + key + ' ' + str(result[key]))
# Calculate & send derived metrics
non_zero_blocks = to_num(result['nonZeroBlocks'])
zero_blocks = to_num(result['zeroBlocks'])
unique_blocks = to_num(result['uniqueBlocks'])
unique_blocks_used_space = to_num(result['uniqueBlocksUsedSpace'])
snapshot_non_zero_blocks = to_num(result['snapshotNonZeroBlocks'])
if non_zero_blocks != 0:
thin_factor = float((non_zero_blocks + zero_blocks)) / float(non_zero_blocks)
else:
thin_factor = 1
if to_graphite:
graphyte.send(prefix + '.thin_factor', to_num(thin_factor))
else:
LOG.warning(prefix + '.thin_factor ' + str(thin_factor))
if unique_blocks != 0:
# cluster with volumes with snapshots can have dedupe ratio < 1
dedupe_factor = float(non_zero_blocks + snapshot_non_zero_blocks) / float(unique_blocks)
else:
dedupe_factor = 1
if to_graphite:
graphyte.send(prefix + '.dedupe_factor', to_num(dedupe_factor))
else:
LOG.warning(prefix + '.dedupe_factor ' + str(dedupe_factor))
if unique_blocks_used_space != 0:
compression_factor = (unique_blocks * 4096.0) / (unique_blocks_used_space * .93)
else:
compression_factor = 1
if to_graphite:
graphyte.send(prefix + '.compression_factor', to_num(compression_factor))
else:
LOG.warning(prefix + '.compression_factor ' + str(compression_factor))
efficiency_cxd_factor = dedupe_factor * compression_factor
efficiency_factor = thin_factor * efficiency_cxd_factor
if to_graphite:
graphyte.send(prefix + '.efficiency_factor', to_num(efficiency_factor))
# w/o Thin Provisioning
graphyte.send(prefix + '.efficiency_cxd_factor', to_num(efficiency_cxd_factor))
else:
LOG.warning(prefix + '.efficiency_factor ' + str(efficiency_factor))
LOG.warning(prefix + '.efficiency_cxd_factor ' + str(efficiency_cxd_factor))
def send_volume_stats(sfe, prefix):
"""
Send a subset of ListVolumeStatsByVolume results to graphite.
Note: Calls ListVolumes to get volume names for use in metric path.
"""
metrics_list = ['actualIOPS', 'averageIOPSize', 'burstIOPSCredit',
'clientQueueDepth', 'latencyUSec', 'nonZeroBlocks', 'readBytes',
'readBytesLastSample', 'readLatencyUSec', 'readOps',
'readOpsLastSample', 'throttle', 'unalignedReads', 'unalignedWrites',
'volumeSize', 'volumeUtilization', 'writeBytes', 'writeBytesLastSample',
'writeLatencyUSec', 'writeOps', 'writeOpsLastSample', 'zeroBlocks']
volume_list = sfe.list_volumes(include_virtual_volumes=False).to_json()['volumes']
volinfo_by_id = list_to_dict(volume_list, key="volumeID")
volstats = sfe.list_volume_stats_by_volume(include_virtual_volumes=False).to_json()['volumeStats']
for vs_dict in volstats:
vol_name = volinfo_by_id[vs_dict['volumeID']]['name']
vol_id = volinfo_by_id[vs_dict['volumeID']]['volumeID']
vol_accountID = volinfo_by_id[vs_dict['volumeID']]['accountID']
vol_accountID = volinfo_by_id[vs_dict['volumeID']]['accountID']
vol_accountName = sfe.get_account_by_id(vol_accountID).to_json()['account']['username']
for key in metrics_list:
if to_graphite:
graphyte.send(prefix + '.accountID.' + str(vol_accountName) +
'.volume.' + str(vol_name) + '.' + key, to_num(vs_dict[key]))
graphyte.send(prefix + '.volumeID.' + str(vol_id) + '.' + key, to_num(vs_dict[key]))
else:
LOG.warning(prefix + '.accountID.' + str(vol_accountName) +
'.volume.' + str(vol_name) + '.' + key + ' ' + str(vs_dict[key]))
LOG.warning(prefix + '.volumeID.' + str(vol_accountName) +
'.volumeID.' + str(vol_id) + '.' + key + ' ' + str(vs_dict[key]))
def send_volume_histogram_stats(sfe, prefix):
"""
Send volume QoS histogram stats. Requires API v11 or above
Note: as of August 2020, this API method is not well documented so
stuff may not mean what we think it means.
"""
hmetrics = ['belowMinIopsPercentages', 'minToMaxIopsPercentages',
'minToMaxIopsPercentages', 'readBlockSizes', 'throttlePercentages',
'writeBlockSizes']
qosh = sfe.invoke_sfapi("ListVolumeQoSHistograms", parameters=None)
for i in range(len(qosh['qosHistograms'])):
for metric in hmetrics:
for key, value in (qosh['qosHistograms'][i]['histograms'][metric]).items():
if to_graphite:
graphyte.send(prefix + '.volumeID.' + str(qosh['qosHistograms'][i]['volumeID'])
+ '.' + metric + '.' + key, int(value))
else:
LOG.warning(prefix + '.volumeID.' + str(qosh['qosHistograms'][i]['volumeID'])
+ '.' + metric + '.' + key + ' ' + str(value))
def send_drive_stats(sfe, prefix):
"""
Calculates summary statistics about drives by status and type at both cluster
and node levels and submits them to graphite.
Calls ListDrives and ListAllNodes
"""
# Cluster level status
drive_list = sfe.list_drives().to_json()['drives']
for status in ['active', 'available', 'erasing', 'failed', 'removing']:
value = count_if(drive_list, 'status', status)
if to_graphite:
graphyte.send(prefix + '.drives.status.' + status, to_num(value))
else:
LOG.warning(prefix + '.drives.status.' + status + ' ' + str(value))
for dtype in ['volume', 'block', 'unknown']:
value = count_if(drive_list, 'type', dtype)
if to_graphite:
graphyte.send(prefix + '.drives.type.' + dtype, to_num(value))
else:
LOG.warning(prefix + '.drives.type.' + dtype + ' ' + str(value))
# Node level status
node_list = sfe.list_all_nodes().to_json()['nodes']
nodeinfo_by_id = list_to_dict(node_list, key="nodeID")
for node in nodeinfo_by_id:
node_name = nodeinfo_by_id[node]['name']
for status in ['active', 'available', 'erasing', 'failed', 'removing']:
value = count_ifs(drive_list, 'status', status, 'nodeID', node)
if to_graphite:
graphyte.send(prefix + '.node.' + node_name + '.drives.status.' + status, to_num(value))
else:
LOG.warning(prefix + '.node.' + node_name + '.drives.status.' + status + ' ' + str(value))
for drive_type in ['volume', 'block', 'unknown']:
value = count_ifs(drive_list, 'type', drive_type, 'nodeID', node)
if to_graphite:
graphyte.send(prefix + '.node.' + node_name + '.drives.type.' + drive_type, to_num(value))
else:
LOG.warning(prefix + '.node.' + node_name + '.drives.type.' + drive_type + ' ' + str(value))
def send_ssd_stats(sfe, prefix):
"""
Send drive wear level from ListDriveStats API method results to graphite.
We could store them under node name, but that doesn't seem necessary.
Note: Calls ListDriveStats to get driveID to use in metric path.
"""
result = sfe.list_drive_stats().to_json()['driveStats']
for i in range(len(result)):
driveId = result[i]['driveID']
lifePct = result[i]['lifeRemainingPercent']
if 'activeSessions' in result[i].keys():
sessions = result[i]['activeSessions']
if to_graphite:
graphyte.send(prefix + '.drives.' + str(driveId) + '.sessions', int(sessions))
graphyte.send(prefix + '.drives.' + str(driveId) + '.lifeRemainingPercent', int(lifePct))
else:
LOG.warning(prefix + '.drives.' + str(driveId) + '.sessions ' + str(sessions))
LOG.warning(prefix + '.drives.' + str(driveId) + '.lifeRemainingPercent ' + str(lifePct))
else:
if to_graphite:
graphyte.send(prefix + '.drives.' + str(driveId) + '.lifeRemainingPercent', int(lifePct))
else:
LOG.warning(prefix + '.drives.' + str(driveId) + '.lifeRemainingPercent ' + str(lifePct))
def send_elem_version(sfe, prefix):
"""
Send the highest API version supported by current Element API
"""
result = sfe.get_cluster_version_info().to_json()
sf_version = result['clusterAPIVersion']
if to_graphite:
graphyte.send(prefix + '.version', float(sf_version))
else:
LOG.warning(prefix + '.version ' + str(sf_version))
def send_acc_eff(sfe, prefix):
"""
Sends the CxD (no Thin Provisioning) account efficiency for
all accounts with one or more volumes
"""
accounts = sfe.list_accounts().to_json()['accounts']
avw = []
for x in range(len(accounts)):
if len(accounts[x]['volumes']) > 0:
avw.append(accounts[x]['accountID'])
for acc in avw:
acc_eff = 0
acc_eff_info = sfe.get_account_efficiency(acc).to_json()
acc_eff = round(acc_eff_info['compression'] * acc_eff_info['deduplication'],2)
if to_graphite:
graphyte.send(prefix + '.accountID.' + str(acc) + '.accountEfficiency', float(acc_eff))
else:
LOG.warning(prefix + '.accountID.' + str(acc) + '.accountEfficiency ' + str(acc_eff))
def send_vol_efficiency(sfe, prefix):
"""
Send per-volume efficiency info (dedupe & compression aka CxD only)
Can be used to identify low-efficiency volumes (e.g. < 1.5x)
"""
results = sfe.list_accounts().to_json()
for account in results['accounts']:
av = sfe.list_volumes(accounts=[account['accountID']]).to_json()
if len(av['volumes']) > 0:
for volume in av['volumes']:
vol_eff_d = sfe.get_volume_efficiency(volume['volumeID']).to_json()
vol_eff = round((vol_eff_d['deduplication'] * vol_eff_d['compression']),2)
if to_graphite:
graphyte.send(prefix + '.volumeID.' + str(volume['volumeID']) + '.volumeEfficiency', float(vol_eff))
else:
print(prefix + '.volumeID.' + str(volume['volumeID']) + '.volumeEfficiency ' + str(vol_eff))
LOG.warning(prefix + '.volumeID.' + str(volume['volumeID']) + '.volumeEfficiency ' + str(vol_eff))
else:
LOG.warning(prefix + ': account ID ' + str(account['accountID']) + ' has no volumes')
def send_conn_stats(sfe, prefix):
"""
calculates iSCSI connection stats at both cluster
and node levels and submits them to Graphite.
Calls ListConnections
"""
result = sfe.list_iscsisessions().to_json()['sessions']
tgts = []
accts = []
for i in range(len(result)):
tgts.append(result[i]['targetIP'].split(':')[0])
accts.append(result[i]['initiatorIP'].split(':')[0])
if to_graphite:
graphyte.send(prefix + '.iscsiActiveSessionCount', len(result))
graphyte.send(prefix + '.iscsiTargetCount', len(set(tgts)))
else:
LOG.warning(prefix + '.iscsiActiveSessionCount ' + str(len(result)))
LOG.warning(prefix + '.iscsiTargetCount ' + str(len(set(tgts))))
def send_node_stats(sfe, prefix):
"""
send a subset of ListNodeStats API call results to graphite.
Note: Calls ListAllNodes to get node name to use in metric path.
"""
metrics_list = ['cpu', 'usedMemory', 'networkUtilizationStorage',
'networkUtilizationCluster', 'cBytesOut', 'cBytesIn', 'sBytesOut',
'sBytesIn', 'mBytesOut', 'mBytesIn', 'readOps', 'writeOps']
node_list = sfe.list_all_nodes().to_json()['nodes']
nodeinfo_by_id = list_to_dict(node_list, key="nodeID")
nodestats = sfe.list_node_stats().to_json()['nodeStats']['nodes']
for ns_dict in nodestats:
node_name = nodeinfo_by_id[ns_dict['nodeID']]['name']
for key in metrics_list:
if to_graphite:
graphyte.send(prefix + '.node.' + node_name + '.' + key, to_num(ns_dict[key]))
else:
LOG.warning(prefix + '.node.' + node_name + '.' + key + ' ' + str(ns_dict[key]))
def list_to_dict(list_of_dicts, key):
"""
pivots a list of dicts into a dict of dicts, using key.
"""
x = dict((child[key], dict(child, index=index)) for (index, child) in \
enumerate(list_of_dicts))
return x
def count_if(my_list, key, value):
"""
return number of records in my_list where key==value pair matches
"""
counter = (1 for item in my_list if item.get(key) == value)
return sum(counter)
def count_ifs(my_list, key, value, key2, value2):
"""
return number of records in my_list where both key==value pairs matches
ToDo: convert to grab any number of key=value pairs
"""
counter = (1 for item in my_list if ((item.get(key) == value) and \
(item.get(key2) == value2)))
return sum(counter)
def to_num(metric):
"""
convert string to number (int or float)
"""
x = 0
try:
x = float(metric)
except ValueError:
try:
x = float(metric)
except ValueError:
x = float('NaN')
finally:
return x
# Parse commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--solidfire',
help='MVIP or FQDN of SolidFire cluster from which metrics should be collected')
parser.add_argument('-u', '--username', default='admin',
help='username for SolidFire array. Default: admin (NOTE: consider using a dedicated reporting admin account)')
parser.add_argument('-p', '--password', default='password',
help='password for admin account on SolidFire cluster. Default: password')
parser.add_argument('-o', '--timeout', type=int, default=10,
help='timeout for SolidFire Collector to connect to SolidFire API. Default: 10 (seconds)')
parser.add_argument('-a', '--apitimeout', type=int, default=20,
help='timeout for SolidFire Collector to get response from the SolidFire API endpoint. Default: 20 (seconds)')
parser.add_argument('-c', '--validatecert', default=False,
help='Validate SF TLS certificate. Default: False (allow self-signed). For "True", --solidfire must use FQDN')
parser.add_argument('-g', '--graphite', default='localhost',
help='hostname of Graphite server to send to. Default: localhost. (NOTE: "debug" sends metrics to logfile)')
parser.add_argument('-t', '--port', type=int, default=2003,
help='port to send message to. Default: 2003. if the --graphite is set to debug, can be omitted')
parser.add_argument('-v', '--version', default="11.0",
help='Element API version. Default: 11.0. Version must be supported by SolidFire Python SDK in sfcollector')
parser.add_argument('-m', '--metricroot', default='netapp.solidfire.cluster',
help='Graphite metric root for sfcollector. Default: netapp.solidfire.cluster')
parser.add_argument('-l', '--logfile',
help='logfile. Default: none. Required if Graphite hostname is "debug" and metrics sent to logfile')
args = parser.parse_args()
to_graphite = True
# Logger module configuration
LOG = logging.getLogger('solidfire_graphite_collector.py')
if args.logfile:
logging.basicConfig(filename=args.logfile, level=logging.DEBUG, format='%(asctime)s %(message)s')
LOG.warning("Starting Collector script as a daemon. No console output possible.")
else:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
# Initialize graphyte sender
if args.graphite == "debug":
LOG.warning("Starting collector in debug mode. All the metrics will be shipped to logfile")
to_graphite = False
else:
graphyte.init(args.graphite, port=args.port, prefix=args.metricroot)
LOG.info("Metrics Collection for array: {0}".format(args.solidfire))
try:
sfe = ElementFactory.create(args.solidfire, args.username, args.password, args.version, verify_ssl=args.validatecert, print_ascii_art=False)
# There are two kinds of timeouts (one is for individual API requests)
# https://github.com/solidfire/solidfire-sdk-python/pull/39/files
sfe.timeout(args.apitimeout)
sfe.connect_timeout(args.timeout)
except solidfire.common.ApiServerError as e:
LOG.warning("ApiServerError: {0}".format(str(e)))
sfe = None
except Exception as e:
LOG.warning("General Exception: {0}".format(str(e)))
sfe = None
try:
cluster_name = sfe.get_cluster_info().to_json()['clusterInfo']['name']
send_cluster_faults(sfe, cluster_name)
send_cluster_stats(sfe, cluster_name)
send_cluster_capacity(sfe, cluster_name)
send_volume_stats(sfe, cluster_name)
send_volume_histogram_stats(sfe, cluster_name)
send_drive_stats(sfe, cluster_name)
send_ssd_stats(sfe, cluster_name)
send_acc_eff(sfe, cluster_name)
send_elem_version(sfe, cluster_name)
send_vol_efficiency(sfe, cluster_name)
send_node_stats(sfe, cluster_name)
send_conn_stats(sfe, cluster_name)
except solidfire.common.ApiServerError as e:
LOG.warning("ApiServerError: {0}".format(str(e)))
except Exception as e:
LOG.warning("General Exception: {0}".format(str(e)))
sfe = None
| 43.849687
| 144
| 0.646305
|
a8559d0c21545bf4f541239a0f19c839fc2241fc
| 3,133
|
py
|
Python
|
MyFirstCodeProjects_GH/recursion_base/anagram.py
|
MingChuan09/MyFirstCodeProjects
|
b8cb844c1471d070c0ed7df627ac71e10e4e013a
|
[
"MIT"
] | null | null | null |
MyFirstCodeProjects_GH/recursion_base/anagram.py
|
MingChuan09/MyFirstCodeProjects
|
b8cb844c1471d070c0ed7df627ac71e10e4e013a
|
[
"MIT"
] | null | null | null |
MyFirstCodeProjects_GH/recursion_base/anagram.py
|
MingChuan09/MyFirstCodeProjects
|
b8cb844c1471d070c0ed7df627ac71e10e4e013a
|
[
"MIT"
] | null | null | null |
"""
File: anagram.py
Name: 陳名娟 Jenny Chen
----------------------------------
This program recursively finds all the anagram(s)
for the word input by user and terminates when the
input string matches the EXIT constant defined
at line 19
If you correctly implement this program, you should see the
number of anagrams for each word listed below:
* arm -> 3 anagrams
* contains -> 5 anagrams
* stop -> 6 anagrams
* tesla -> 10 anagrams
* spear -> 12 anagrams
"""
import time # This file allows you to calculate the speed of your algorithm
# Constants
FILE = 'dictionary.txt' # This is the filename of an English dictionary
EXIT = '-1' # Controls when to stop the loop
# global variable
dictionary = []
def main():
"""
To find the anagrams of the word inputted by user.
"""
start = time.time()
####################
print(f'Welcome to stanCode \"Anagram Generator\" (or {EXIT} to quit)')
read_dictionary()
while True:
word = input('Find anagram for: ')
if word == EXIT:
break
else:
find_anagrams(word)
####################
end = time.time()
print('----------------------------------')
print(f'The speed of your anagram algorithm: {end-start} seconds.')
def read_dictionary():
with open(FILE, 'r') as f:
for line in f:
dictionary.append(line.strip())
def find_anagrams(s):
"""
:param s: str, the main word inputted by user to find anagrams
"""
count = [0]
ans_list = []
find_anagrams_helper(s, '', ans_list, count, [])
print('Searching...')
print(str(count[0]) + ' anagrams: ' + str(ans_list))
def find_anagrams_helper(s, current_str, ans_list, count, index_list):
"""
:param s: str, to find anagrams of the main word inputted by user
:param current_str: str,
:param ans_list: list, include the anagrams
:param count: list, the index 0 is the number of the anagrams
:param index_list: list, the index list
"""
if len(current_str) == len(s):
if current_str in dictionary:
if current_str not in ans_list:
ans_list.append(current_str)
count[0] += 1
print('Searching...')
print(f'Find: {current_str}')
else:
for i in range(len(s)):
alpha = s[i]
if i not in index_list: # 使用index以確保每個字母都有列入考慮
# choose
index_list.append(i)
current_str += alpha
# explore
if has_prefix(current_str): # 當有該字開頭之字首, 才繼續explore
find_anagrams_helper(s, current_str, ans_list, count, index_list)
# un-choose
index_list.pop()
current_str = current_str[0:len(current_str)-1]
def has_prefix(sub_s):
"""
:param sub_s: str, the part of the word
:return: the boolean answer
"""
for word in dictionary:
if word.startswith(sub_s):
return True
return False
if __name__ == '__main__':
main()
| 28.225225
| 93
| 0.569103
|
081a337b911ad9aea657fd0ce8b7af0664fd1d70
| 546
|
py
|
Python
|
convertmask/utils/backup/getArea.py
|
wwdok/mask2json
|
403c6b3df677185d5951239d13187b55bda6465a
|
[
"Apache-2.0"
] | 27
|
2020-06-11T01:47:57.000Z
|
2022-03-18T01:47:59.000Z
|
convertmask/utils/backup/getArea.py
|
wwdok/mask2json
|
403c6b3df677185d5951239d13187b55bda6465a
|
[
"Apache-2.0"
] | 14
|
2020-06-11T01:50:41.000Z
|
2022-03-18T13:11:04.000Z
|
convertmask/utils/backup/getArea.py
|
wwdok/mask2json
|
403c6b3df677185d5951239d13187b55bda6465a
|
[
"Apache-2.0"
] | 5
|
2020-10-21T01:51:59.000Z
|
2022-02-28T08:27:32.000Z
|
'''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-08-17 13:48:46
LastEditors: xiaoshuyui
LastEditTime: 2020-10-10 15:41:28
'''
def getAreaOfPolyGonbyVector(points):
# 基于向量叉乘计算多边形面积
area = 0
if (len(points) < 3):
raise Exception("points is not enough to calculate area!")
for i in range(0, len(points) - 1):
p1 = points[i]
# print(p1)
p2 = points[i + 1]
triArea = (p1[0][0] * p2[0][1] - p2[0][0] * p1[0][1]) / 2
area += triArea
return abs(area)
| 21
| 66
| 0.591575
|
4d3a9d936d0aba674062add9184bb01e46ae9e86
| 838
|
py
|
Python
|
admin/products/migrations/0001_initial.py
|
Branbados/microservices-application
|
3166defbb3713464138cb936d318de94819bd8f1
|
[
"MIT"
] | null | null | null |
admin/products/migrations/0001_initial.py
|
Branbados/microservices-application
|
3166defbb3713464138cb936d318de94819bd8f1
|
[
"MIT"
] | null | null | null |
admin/products/migrations/0001_initial.py
|
Branbados/microservices-application
|
3166defbb3713464138cb936d318de94819bd8f1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2021-07-08 23:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('image', models.CharField(max_length=200)),
('likes', models.PositiveBigIntegerField(default=0)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| 27.933333
| 114
| 0.557279
|
e06b82f2eb0926082dc5b212d2b791ef845898d4
| 54,080
|
py
|
Python
|
fsspec/spec.py
|
d4l3k/filesystem_spec
|
bd0f7d2442e1645a696a8ebd6e56c5ebdc8cb960
|
[
"BSD-3-Clause"
] | 1
|
2021-11-19T11:39:54.000Z
|
2021-11-19T11:39:54.000Z
|
fsspec/spec.py
|
d4l3k/filesystem_spec
|
bd0f7d2442e1645a696a8ebd6e56c5ebdc8cb960
|
[
"BSD-3-Clause"
] | null | null | null |
fsspec/spec.py
|
d4l3k/filesystem_spec
|
bd0f7d2442e1645a696a8ebd6e56c5ebdc8cb960
|
[
"BSD-3-Clause"
] | null | null | null |
import io
import logging
import os
import threading
import warnings
import weakref
from distutils.version import LooseVersion
from errno import ESPIPE
from glob import has_magic
from hashlib import sha256
from .callbacks import _DEFAULT_CALLBACK
from .config import apply_config, conf
from .dircache import DirCache
from .transaction import Transaction
from .utils import (
get_package_version_without_import,
other_paths,
read_block,
stringify_path,
tokenize,
)
logger = logging.getLogger("fsspec")
def make_instance(cls, args, kwargs):
return cls(*args, **kwargs)
class _Cached(type):
"""
Metaclass for caching file system instances.
Notes
-----
Instances are cached according to
* The values of the class attributes listed in `_extra_tokenize_attributes`
* The arguments passed to ``__init__``.
This creates an additional reference to the filesystem, which prevents the
filesystem from being garbage collected when all *user* references go away.
A call to the :meth:`AbstractFileSystem.clear_instance_cache` must *also*
be made for a filesystem instance to be garbage collected.
"""
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
# Note: we intentionally create a reference here, to avoid garbage
# collecting instances when all other references are gone. To really
# delete a FileSystem, the cache must be cleared.
if conf.get("weakref_instance_cache"): # pragma: no cover
# debug option for analysing fork/spawn conditions
cls._cache = weakref.WeakValueDictionary()
else:
cls._cache = {}
cls._pid = os.getpid()
def __call__(cls, *args, **kwargs):
kwargs = apply_config(cls, kwargs)
extra_tokens = tuple(
getattr(cls, attr, None) for attr in cls._extra_tokenize_attributes
)
token = tokenize(
cls, cls._pid, threading.get_ident(), *args, *extra_tokens, **kwargs
)
skip = kwargs.pop("skip_instance_cache", False)
if os.getpid() != cls._pid:
cls._cache.clear()
cls._pid = os.getpid()
if not skip and cls.cachable and token in cls._cache:
return cls._cache[token]
else:
obj = super().__call__(*args, **kwargs)
# Setting _fs_token here causes some static linters to complain.
obj._fs_token_ = token
obj.storage_args = args
obj.storage_options = kwargs
if obj.async_impl:
from .asyn import mirror_sync_methods
mirror_sync_methods(obj)
if cls.cachable and not skip:
cls._cache[token] = obj
return obj
pa_version = get_package_version_without_import("pyarrow")
if pa_version and LooseVersion(pa_version) < LooseVersion("2.0"):
try:
import pyarrow as pa
up = pa.filesystem.DaskFileSystem
except ImportError: # pragma: no cover
# pyarrow exists but doesn't import for some reason
up = object
else: # pragma: no cover
up = object
class AbstractFileSystem(up, metaclass=_Cached):
"""
An abstract super-class for pythonic file-systems
Implementations are expected to be compatible with or, better, subclass
from here.
"""
cachable = True # this class can be cached, instances reused
_cached = False
blocksize = 2 ** 22
sep = "/"
protocol = "abstract"
async_impl = False
root_marker = "" # For some FSs, may require leading '/' or other character
#: Extra *class attributes* that should be considered when hashing.
_extra_tokenize_attributes = ()
def __init__(self, *args, **storage_options):
"""Create and configure file-system instance
Instances may be cachable, so if similar enough arguments are seen
a new instance is not required. The token attribute exists to allow
implementations to cache instances if they wish.
A reasonable default should be provided if there are no arguments.
Subclasses should call this method.
Parameters
----------
use_listings_cache, listings_expiry_time, max_paths:
passed to ``DirCache``, if the implementation supports
directory listing caching. Pass use_listings_cache=False
to disable such caching.
skip_instance_cache: bool
If this is a cachable implementation, pass True here to force
creating a new instance even if a matching instance exists, and prevent
storing this instance.
asynchronous: bool
loop: asyncio-compatible IOLoop or None
"""
if self._cached:
# reusing instance, don't change
return
self._cached = True
self._intrans = False
self._transaction = None
self._invalidated_caches_in_transaction = []
self.dircache = DirCache(**storage_options)
if storage_options.pop("add_docs", None):
warnings.warn("add_docs is no longer supported.", FutureWarning)
if storage_options.pop("add_aliases", None):
warnings.warn("add_aliases has been removed.", FutureWarning)
# This is set in _Cached
self._fs_token_ = None
@property
def _fs_token(self):
return self._fs_token_
def __dask_tokenize__(self):
return self._fs_token
def __hash__(self):
return int(self._fs_token, 16)
def __eq__(self, other):
return isinstance(other, type(self)) and self._fs_token == other._fs_token
def __reduce__(self):
return make_instance, (type(self), self.storage_args, self.storage_options)
@classmethod
def _strip_protocol(cls, path):
"""Turn path from fully-qualified to file-system-specific
May require FS-specific handling, e.g., for relative paths or links.
"""
if isinstance(path, list):
return [cls._strip_protocol(p) for p in path]
path = stringify_path(path)
protos = (cls.protocol,) if isinstance(cls.protocol, str) else cls.protocol
for protocol in protos:
if path.startswith(protocol + "://"):
path = path[len(protocol) + 3 :]
elif path.startswith(protocol + "::"):
path = path[len(protocol) + 2 :]
path = path.rstrip("/")
# use of root_marker to make minimum required path, e.g., "/"
return path or cls.root_marker
@staticmethod
def _get_kwargs_from_urls(path):
"""If kwargs can be encoded in the paths, extract them here
This should happen before instantiation of the class; incoming paths
then should be amended to strip the options in methods.
Examples may look like an sftp path "sftp://user@host:/my/path", where
the user and host should become kwargs and later get stripped.
"""
# by default, nothing happens
return {}
@classmethod
def current(cls):
"""Return the most recently created FileSystem
If no instance has been created, then create one with defaults
"""
if not len(cls._cache):
return cls()
else:
return list(cls._cache.values())[-1]
@property
def transaction(self):
"""A context within which files are committed together upon exit
Requires the file class to implement `.commit()` and `.discard()`
for the normal and exception cases.
"""
if self._transaction is None:
self._transaction = Transaction(self)
return self._transaction
def start_transaction(self):
"""Begin write transaction for deferring files, non-context version"""
self._intrans = True
self._transaction = Transaction(self)
return self.transaction
def end_transaction(self):
"""Finish write transaction, non-context version"""
self.transaction.complete()
self._transaction = None
# The invalid cache must be cleared after the transcation is completed.
for path in self._invalidated_caches_in_transaction:
self.invalidate_cache(path)
self._invalidated_caches_in_transaction.clear()
def invalidate_cache(self, path=None):
"""
Discard any cached directory information
Parameters
----------
path: string or None
If None, clear all listings cached else listings at or under given
path.
"""
# Not necessary to implement invalidation mechanism, may have no cache.
# But if have, you should call this method of parent class from your
# subclass to ensure expiring caches after transacations correctly.
# See the implementaion of FTPFileSystem in ftp.py
if self._intrans:
self._invalidated_caches_in_transaction.append(path)
def mkdir(self, path, create_parents=True, **kwargs):
"""
Create directory entry at path
For systems that don't have true directories, may create an for
this instance only and not touch the real filesystem
Parameters
----------
path: str
location
create_parents: bool
if True, this is equivalent to ``makedirs``
kwargs:
may be permissions, etc.
"""
pass # not necessary to implement, may not have directories
def makedirs(self, path, exist_ok=False):
"""Recursively make directories
Creates directory at path and any intervening required directories.
Raises exception if, for instance, the path already exists but is a
file.
Parameters
----------
path: str
leaf directory name
exist_ok: bool (False)
If True, will error if the target already exists
"""
pass # not necessary to implement, may not have directories
def rmdir(self, path):
"""Remove a directory, if empty"""
pass # not necessary to implement, may not have directories
def ls(self, path, detail=True, **kwargs):
"""List objects at path.
This should include subdirectories and files at that location. The
difference between a file and a directory must be clear when details
are requested.
The specific keys, or perhaps a FileInfo class, or similar, is TBD,
but must be consistent across implementations.
Must include:
- full path to the entry (without protocol)
- size of the entry, in bytes. If the value cannot be determined, will
be ``None``.
- type of entry, "file", "directory" or other
Additional information
may be present, aproriate to the file-system, e.g., generation,
checksum, etc.
May use refresh=True|False to allow use of self._ls_from_cache to
check for a saved listing and avoid calling the backend. This would be
common where listing may be expensive.
Parameters
----------
path: str
detail: bool
if True, gives a list of dictionaries, where each is the same as
the result of ``info(path)``. If False, gives a list of paths
(str).
kwargs: may have additional backend-specific options, such as version
information
Returns
-------
List of strings if detail is False, or list of directory information
dicts if detail is True.
"""
raise NotImplementedError
def _ls_from_cache(self, path):
"""Check cache for listing
Returns listing, if found (may me empty list for a directly that exists
but contains nothing), None if not in cache.
"""
parent = self._parent(path)
if path.rstrip("/") in self.dircache:
return self.dircache[path.rstrip("/")]
try:
files = [
f
for f in self.dircache[parent]
if f["name"] == path
or (f["name"] == path.rstrip("/") and f["type"] == "directory")
]
if len(files) == 0:
# parent dir was listed but did not contain this file
raise FileNotFoundError(path)
return files
except KeyError:
pass
def walk(self, path, maxdepth=None, **kwargs):
"""Return all files belows path
List all files, recursing into subdirectories; output is iterator-style,
like ``os.walk()``. For a simple list of files, ``find()`` is available.
Note that the "files" outputted will include anything that is not
a directory, such as links.
Parameters
----------
path: str
Root to recurse into
maxdepth: int
Maximum recursion depth. None means limitless, but not recommended
on link-based file-systems.
kwargs: passed to ``ls``
"""
path = self._strip_protocol(path)
full_dirs = {}
dirs = {}
files = {}
detail = kwargs.pop("detail", False)
try:
listing = self.ls(path, detail=True, **kwargs)
except (FileNotFoundError, IOError):
if detail:
return path, {}, {}
return path, [], []
for info in listing:
# each info name must be at least [path]/part , but here
# we check also for names like [path]/part/
pathname = info["name"].rstrip("/")
name = pathname.rsplit("/", 1)[-1]
if info["type"] == "directory" and pathname != path:
# do not include "self" path
full_dirs[pathname] = info
dirs[name] = info
elif pathname == path:
# file-like with same name as give path
files[""] = info
else:
files[name] = info
if detail:
yield path, dirs, files
else:
yield path, list(dirs), list(files)
if maxdepth is not None:
maxdepth -= 1
if maxdepth < 1:
return
for d in full_dirs:
yield from self.walk(d, maxdepth=maxdepth, detail=detail, **kwargs)
def find(self, path, maxdepth=None, withdirs=False, **kwargs):
"""List all files below path.
Like posix ``find`` command without conditions
Parameters
----------
path : str
maxdepth: int or None
If not None, the maximum number of levels to descend
withdirs: bool
Whether to include directory paths in the output. This is True
when used by glob, but users usually only want files.
kwargs are passed to ``ls``.
"""
# TODO: allow equivalent of -name parameter
path = self._strip_protocol(path)
out = dict()
detail = kwargs.pop("detail", False)
for _, dirs, files in self.walk(path, maxdepth, detail=True, **kwargs):
if withdirs:
files.update(dirs)
out.update({info["name"]: info for name, info in files.items()})
if not out and self.isfile(path):
# walk works on directories, but find should also return [path]
# when path happens to be a file
out[path] = {}
names = sorted(out)
if not detail:
return names
else:
return {name: out[name] for name in names}
def du(self, path, total=True, maxdepth=None, **kwargs):
"""Space used by files within a path
Parameters
----------
path: str
total: bool
whether to sum all the file sizes
maxdepth: int or None
maximum number of directory levels to descend, None for unlimited.
kwargs: passed to ``ls``
Returns
-------
Dict of {fn: size} if total=False, or int otherwise, where numbers
refer to bytes used.
"""
sizes = {}
for f in self.find(path, maxdepth=maxdepth, **kwargs):
info = self.info(f)
sizes[info["name"]] = info["size"]
if total:
return sum(sizes.values())
else:
return sizes
def glob(self, path, **kwargs):
"""
Find files by glob-matching.
If the path ends with '/' and does not contain "*", it is essentially
the same as ``ls(path)``, returning only files.
We support ``"**"``,
``"?"`` and ``"[..]"``. We do not support ^ for pattern negation.
Search path names that contain embedded characters special to this
implementation of glob may not produce expected results;
e.g., 'foo/bar/*starredfilename*'.
kwargs are passed to ``ls``.
"""
import re
ends = path.endswith("/")
path = self._strip_protocol(path)
indstar = path.find("*") if path.find("*") >= 0 else len(path)
indques = path.find("?") if path.find("?") >= 0 else len(path)
indbrace = path.find("[") if path.find("[") >= 0 else len(path)
ind = min(indstar, indques, indbrace)
detail = kwargs.pop("detail", False)
if not has_magic(path):
root = path
depth = 1
if ends:
path += "/*"
elif self.exists(path):
if not detail:
return [path]
else:
return {path: self.info(path)}
else:
if not detail:
return [] # glob of non-existent returns empty
else:
return {}
elif "/" in path[:ind]:
ind2 = path[:ind].rindex("/")
root = path[: ind2 + 1]
depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
else:
root = ""
depth = None if "**" in path else path[ind + 1 :].count("/") + 1
allpaths = self.find(root, maxdepth=depth, withdirs=True, detail=True, **kwargs)
# Escape characters special to python regex, leaving our supported
# special characters in place.
# See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
# for shell globbing details.
pattern = (
"^"
+ (
path.replace("\\", r"\\")
.replace(".", r"\.")
.replace("+", r"\+")
.replace("//", "/")
.replace("(", r"\(")
.replace(")", r"\)")
.replace("|", r"\|")
.replace("^", r"\^")
.replace("$", r"\$")
.replace("{", r"\{")
.replace("}", r"\}")
.rstrip("/")
.replace("?", ".")
)
+ "$"
)
pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
pattern = re.sub("[*]", "[^/]*", pattern)
pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
out = {
p: allpaths[p]
for p in sorted(allpaths)
if pattern.match(p.replace("//", "/").rstrip("/"))
}
if detail:
return out
else:
return list(out)
def exists(self, path, **kwargs):
"""Is there a file at the given path"""
try:
self.info(path, **kwargs)
return True
except: # noqa: E722
# any exception allowed bar FileNotFoundError?
return False
def info(self, path, **kwargs):
"""Give details of entry at path
Returns a single dictionary, with exactly the same information as ``ls``
would with ``detail=True``.
The default implementation should calls ls and could be overridden by a
shortcut. kwargs are passed on to ```ls()``.
Some file systems might not be able to measure the file's size, in
which case, the returned dict will include ``'size': None``.
Returns
-------
dict with keys: name (full path in the FS), size (in bytes), type (file,
directory, or something else) and other FS-specific keys.
"""
path = self._strip_protocol(path)
out = self.ls(self._parent(path), detail=True, **kwargs)
out = [o for o in out if o["name"].rstrip("/") == path]
if out:
return out[0]
out = self.ls(path, detail=True, **kwargs)
path = path.rstrip("/")
out1 = [o for o in out if o["name"].rstrip("/") == path]
if len(out1) == 1:
if "size" not in out1[0]:
out1[0]["size"] = None
return out1[0]
elif len(out1) > 1 or out:
return {"name": path, "size": 0, "type": "directory"}
else:
raise FileNotFoundError(path)
def checksum(self, path):
"""Unique value for current version of file
If the checksum is the same from one moment to another, the contents
are guaranteed to be the same. If the checksum changes, the contents
*might* have changed.
This should normally be overridden; default will probably capture
creation/modification timestamp (which would be good) or maybe
access timestamp (which would be bad)
"""
return int(tokenize(self.info(path)), 16)
def size(self, path):
"""Size in bytes of file"""
return self.info(path).get("size", None)
def isdir(self, path):
"""Is this entry directory-like?"""
try:
return self.info(path)["type"] == "directory"
except IOError:
return False
def isfile(self, path):
"""Is this entry file-like?"""
try:
return self.info(path)["type"] == "file"
except: # noqa: E722
return False
def cat_file(self, path, start=None, end=None, **kwargs):
"""Get the content of a file
Parameters
----------
path: URL of file on this filesystems
start, end: int
Bytes limits of the read. If negative, backwards from end,
like usual python slices. Either can be None for start or
end of file, respectively
kwargs: passed to ``open()``.
"""
# explicitly set buffering off?
with self.open(path, "rb", **kwargs) as f:
if start is not None:
if start >= 0:
f.seek(start)
else:
f.seek(max(0, f.size + start))
if end is not None:
if end < 0:
end = f.size + end
return f.read(end - f.tell())
return f.read()
def pipe_file(self, path, value, **kwargs):
"""Set the bytes of given file"""
with self.open(path, "wb") as f:
f.write(value)
def pipe(self, path, value=None, **kwargs):
"""Put value into path
(counterpart to ``cat``)
Parameters
----------
path: string or dict(str, bytes)
If a string, a single remote location to put ``value`` bytes; if a dict,
a mapping of {path: bytesvalue}.
value: bytes, optional
If using a single path, these are the bytes to put there. Ignored if
``path`` is a dict
"""
if isinstance(path, str):
self.pipe_file(self._strip_protocol(path), value, **kwargs)
elif isinstance(path, dict):
for k, v in path.items():
self.pipe_file(self._strip_protocol(k), v, **kwargs)
else:
raise ValueError("path must be str or dict")
def cat(self, path, recursive=False, on_error="raise", **kwargs):
"""Fetch (potentially multiple) paths' contents
Returns a dict of {path: contents} if there are multiple paths
or the path has been otherwise expanded
on_error : "raise", "omit", "return"
If raise, an underlying exception will be raised (converted to KeyError
if the type is in self.missing_exceptions); if omit, keys with exception
will simply not be included in the output; if "return", all keys are
included in the output, but the value will be bytes or an exception
instance.
"""
paths = self.expand_path(path, recursive=recursive)
if (
len(paths) > 1
or isinstance(path, list)
or paths[0] != self._strip_protocol(path)
):
out = {}
for path in paths:
try:
out[path] = self.cat_file(path, **kwargs)
except Exception as e:
if on_error == "raise":
raise
if on_error == "return":
out[path] = e
return out
else:
return self.cat_file(paths[0], **kwargs)
def get_file(self, rpath, lpath, callback=_DEFAULT_CALLBACK, **kwargs):
"""Copy single remote file to local"""
if self.isdir(rpath):
os.makedirs(lpath, exist_ok=True)
return None
with self.open(rpath, "rb", **kwargs) as f1:
callback.set_size(getattr(f1, "size", None))
with open(lpath, "wb") as f2:
data = True
while data:
data = f1.read(self.blocksize)
segment_len = f2.write(data)
callback.relative_update(segment_len)
def get(self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs):
"""Copy file(s) to local.
Copies a specific file or tree of files (if recursive=True). If lpath
ends with a "/", it will be assumed to be a directory, and target files
will go within. Can submit a list of paths, which may be glob-patterns
and will be expanded.
Calls get_file for each source.
"""
from .implementations.local import make_path_posix
if isinstance(lpath, str):
lpath = make_path_posix(lpath)
rpaths = self.expand_path(rpath, recursive=recursive)
lpaths = other_paths(rpaths, lpath)
callback.set_size(len(lpaths))
for lpath, rpath in callback.wrap(zip(lpaths, rpaths)):
callback.branch(rpath, lpath, kwargs)
self.get_file(rpath, lpath, **kwargs)
def put_file(self, lpath, rpath, callback=_DEFAULT_CALLBACK, **kwargs):
"""Copy single file to remote"""
if os.path.isdir(lpath):
self.makedirs(rpath, exist_ok=True)
return None
with open(lpath, "rb") as f1:
callback.set_size(f1.seek(0, 2))
f1.seek(0)
self.mkdirs(os.path.dirname(rpath), exist_ok=True)
with self.open(rpath, "wb", **kwargs) as f2:
data = True
while data:
data = f1.read(self.blocksize)
segment_len = f2.write(data)
callback.relative_update(segment_len)
def put(self, lpath, rpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs):
"""Copy file(s) from local.
Copies a specific file or tree of files (if recursive=True). If rpath
ends with a "/", it will be assumed to be a directory, and target files
will go within.
Calls put_file for each source.
"""
from .implementations.local import LocalFileSystem, make_path_posix
rpath = (
self._strip_protocol(rpath)
if isinstance(rpath, str)
else [self._strip_protocol(p) for p in rpath]
)
if isinstance(lpath, str):
lpath = make_path_posix(lpath)
fs = LocalFileSystem()
lpaths = fs.expand_path(lpath, recursive=recursive)
rpaths = other_paths(
lpaths, rpath, exists=isinstance(rpath, str) and self.isdir(rpath)
)
callback.set_size(len(rpaths))
for lpath, rpath in callback.wrap(zip(lpaths, rpaths)):
callback.branch(lpath, rpath, kwargs)
self.put_file(lpath, rpath, **kwargs)
def head(self, path, size=1024):
""" Get the first ``size`` bytes from file """
with self.open(path, "rb") as f:
return f.read(size)
def tail(self, path, size=1024):
""" Get the last ``size`` bytes from file """
with self.open(path, "rb") as f:
f.seek(max(-size, -f.size), 2)
return f.read()
def cp_file(self, path1, path2, **kwargs):
raise NotImplementedError
def copy(self, path1, path2, recursive=False, on_error=None, **kwargs):
"""Copy within two locations in the filesystem
on_error : "raise", "ignore"
If raise, any not-found exceptions will be raised; if ignore any
not-found exceptions will cause the path to be skipped; defaults to
raise unless recursive is true, where the default is ignore
"""
if on_error is None and recursive:
on_error = "ignore"
elif on_error is None:
on_error = "raise"
paths = self.expand_path(path1, recursive=recursive)
path2 = other_paths(paths, path2)
for p1, p2 in zip(paths, path2):
try:
self.cp_file(p1, p2, **kwargs)
except FileNotFoundError:
if on_error == "raise":
raise
def expand_path(self, path, recursive=False, maxdepth=None):
"""Turn one or more globs or directories into a list of all matching paths
to files or directories."""
if isinstance(path, str):
out = self.expand_path([path], recursive, maxdepth)
else:
# reduce depth on each recursion level unless None or 0
maxdepth = maxdepth if not maxdepth else maxdepth - 1
out = set()
path = [self._strip_protocol(p) for p in path]
for p in path:
if has_magic(p):
bit = set(self.glob(p))
out |= bit
if recursive:
out |= set(
self.expand_path(
list(bit), recursive=recursive, maxdepth=maxdepth
)
)
continue
elif recursive:
rec = set(self.find(p, maxdepth=maxdepth, withdirs=True))
out |= rec
if p not in out and (recursive is False or self.exists(p)):
# should only check once, for the root
out.add(p)
if not out:
raise FileNotFoundError(path)
return list(sorted(out))
def mv(self, path1, path2, recursive=False, maxdepth=None, **kwargs):
""" Move file(s) from one location to another """
self.copy(path1, path2, recursive=recursive, maxdepth=maxdepth)
self.rm(path1, recursive=recursive)
def rm_file(self, path):
"""Delete a file"""
self._rm(path)
def _rm(self, path):
"""Delete one file"""
# this is the old name for the method, prefer rm_file
raise NotImplementedError
def rm(self, path, recursive=False, maxdepth=None):
"""Delete files.
Parameters
----------
path: str or list of str
File(s) to delete.
recursive: bool
If file(s) are directories, recursively delete contents and then
also remove the directory
maxdepth: int or None
Depth to pass to walk for finding files to delete, if recursive.
If None, there will be no limit and infinite recursion may be
possible.
"""
path = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
for p in reversed(path):
self.rm_file(p)
@classmethod
def _parent(cls, path):
path = cls._strip_protocol(path.rstrip("/"))
if "/" in path:
parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker)
return cls.root_marker + parent
else:
return cls.root_marker
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
"""Return raw bytes-mode file-like from the file-system"""
return AbstractBufferedFile(
self,
path,
mode,
block_size,
autocommit,
cache_options=cache_options,
**kwargs,
)
def open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
"""
Return a file-like object from the filesystem
The resultant instance must function correctly in a context ``with``
block.
Parameters
----------
path: str
Target file
mode: str like 'rb', 'w'
See builtin ``open()``
block_size: int
Some indication of buffering - this is a value in bytes
cache_options : dict, optional
Extra arguments to pass through to the cache.
encoding, errors, newline: passed on to TextIOWrapper for text mode
"""
import io
path = self._strip_protocol(path)
if "b" not in mode:
mode = mode.replace("t", "") + "b"
text_kwargs = {
k: kwargs.pop(k)
for k in ["encoding", "errors", "newline"]
if k in kwargs
}
return io.TextIOWrapper(
self.open(path, mode, block_size, **kwargs), **text_kwargs
)
else:
ac = kwargs.pop("autocommit", not self._intrans)
f = self._open(
path,
mode=mode,
block_size=block_size,
autocommit=ac,
cache_options=cache_options,
**kwargs,
)
if not ac and "r" not in mode:
self.transaction.files.append(f)
return f
def touch(self, path, truncate=True, **kwargs):
"""Create empty file, or update timestamp
Parameters
----------
path: str
file location
truncate: bool
If True, always set file size to 0; if False, update timestamp and
leave file unchanged, if backend allows this
"""
if truncate or not self.exists(path):
with self.open(path, "wb", **kwargs):
pass
else:
raise NotImplementedError # update timestamp, if possible
def ukey(self, path):
"""Hash of file properties, to tell if it has changed"""
return sha256(str(self.info(path)).encode()).hexdigest()
def read_block(self, fn, offset, length, delimiter=None):
"""Read a block of bytes from
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned WILL include the end delimiter string.
If offset+length is beyond the eof, reads to eof.
Parameters
----------
fn: string
Path to filename
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
>>> fs.read_block('data/file.csv', 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> fs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
Use ``length=None`` to read to the end of the file.
>>> fs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\nCharlie, 300'
See Also
--------
utils.read_block
"""
with self.open(fn, "rb") as f:
size = f.size
if length is None:
length = size
if size is not None and offset + length > size:
length = size - offset
return read_block(f, offset, length, delimiter)
def to_json(self):
"""
JSON representation of this filesystem instance
Returns
-------
str: JSON structure with keys cls (the python location of this class),
protocol (text name of this class's protocol, first one in case of
multiple), args (positional args, usually empty), and all other
kwargs as their own keys.
"""
import json
cls = type(self)
cls = ".".join((cls.__module__, cls.__name__))
proto = (
self.protocol[0]
if isinstance(self.protocol, (tuple, list))
else self.protocol
)
return json.dumps(
dict(
**{"cls": cls, "protocol": proto, "args": self.storage_args},
**self.storage_options,
)
)
@staticmethod
def from_json(blob):
"""
Recreate a filesystem instance from JSON representation
See ``.to_json()`` for the expected structure of the input
Parameters
----------
blob: str
Returns
-------
file system instance, not necessarily of this particular class.
"""
import json
from .registry import _import_class, get_filesystem_class
dic = json.loads(blob)
protocol = dic.pop("protocol")
try:
cls = _import_class(dic.pop("cls"))
except (ImportError, ValueError, RuntimeError, KeyError):
cls = get_filesystem_class(protocol)
return cls(*dic.pop("args", ()), **dic)
def _get_pyarrow_filesystem(self):
"""
Make a version of the FS instance which will be acceptable to pyarrow
"""
# all instances already also derive from pyarrow
return self
def get_mapper(self, root, check=False, create=False):
"""Create key/value store based on this file-system
Makes a MutibleMapping interface to the FS at the given root path.
See ``fsspec.mapping.FSMap`` for further details.
"""
from .mapping import FSMap
return FSMap(root, self, check, create)
@classmethod
def clear_instance_cache(cls):
"""
Clear the cache of filesystem instances.
Notes
-----
Unless overridden by setting the ``cachable`` class attribute to False,
the filesystem class stores a reference to newly created instances. This
prevents Python's normal rules around garbage collection from working,
since the instances refcount will not drop to zero until
``clear_instance_cache`` is called.
"""
cls._cache.clear()
def created(self, path):
"""Return the created timestamp of a file as a datetime.datetime"""
raise NotImplementedError
def modified(self, path):
"""Return the modified timestamp of a file as a datetime.datetime"""
raise NotImplementedError
# ------------------------------------------------------------------------
# Aliases
def makedir(self, path, create_parents=True, **kwargs):
"""Alias of :ref:`FilesystemSpec.mkdir`."""
return self.mkdir(path, create_parents=create_parents, **kwargs)
def mkdirs(self, path, exist_ok=False):
"""Alias of :ref:`FilesystemSpec.makedirs`."""
return self.makedirs(path, exist_ok=exist_ok)
def listdir(self, path, detail=True, **kwargs):
"""Alias of :ref:`FilesystemSpec.ls`."""
return self.ls(path, detail=detail, **kwargs)
def cp(self, path1, path2, **kwargs):
"""Alias of :ref:`FilesystemSpec.copy`."""
return self.copy(path1, path2, **kwargs)
def move(self, path1, path2, **kwargs):
"""Alias of :ref:`FilesystemSpec.mv`."""
return self.mv(path1, path2, **kwargs)
def stat(self, path, **kwargs):
"""Alias of :ref:`FilesystemSpec.info`."""
return self.info(path, **kwargs)
def disk_usage(self, path, total=True, maxdepth=None, **kwargs):
"""Alias of :ref:`FilesystemSpec.du`."""
return self.du(path, total=total, maxdepth=maxdepth, **kwargs)
def rename(self, path1, path2, **kwargs):
"""Alias of :ref:`FilesystemSpec.mv`."""
return self.mv(path1, path2, **kwargs)
def delete(self, path, recursive=False, maxdepth=None):
"""Alias of :ref:`FilesystemSpec.rm`."""
return self.rm(path, recursive=recursive, maxdepth=maxdepth)
def upload(self, lpath, rpath, recursive=False, **kwargs):
"""Alias of :ref:`FilesystemSpec.put`."""
return self.put(lpath, rpath, recursive=recursive, **kwargs)
def download(self, rpath, lpath, recursive=False, **kwargs):
"""Alias of :ref:`FilesystemSpec.get`."""
return self.get(rpath, lpath, recursive=recursive, **kwargs)
def sign(self, path, expiration=100, **kwargs):
"""Create a signed URL representing the given path
Some implementations allow temporary URLs to be generated, as a
way of delegating credentials.
Parameters
----------
path : str
The path on the filesystem
expiration : int
Number of seconds to enable the URL for (if supported)
Returns
-------
URL : str
The signed URL
Raises
------
NotImplementedError : if method is not implemented for a fileystem
"""
raise NotImplementedError("Sign is not implemented for this filesystem")
def _isfilestore(self):
# Originally inherited from pyarrow DaskFileSystem. Keeping this
# here for backwards compatibility as long as pyarrow uses its
# legacy fsspec-compatible filesystems and thus accepts fsspec
# filesystems as well
return False
class AbstractBufferedFile(io.IOBase):
"""Convenient class to derive from to provide buffering
In the case that the backend does not provide a pythonic file-like object
already, this class contains much of the logic to build one. The only
methods that need to be overridden are ``_upload_chunk``,
``_initiate_upload`` and ``_fetch_range``.
"""
DEFAULT_BLOCK_SIZE = 5 * 2 ** 20
def __init__(
self,
fs,
path,
mode="rb",
block_size="default",
autocommit=True,
cache_type="readahead",
cache_options=None,
**kwargs,
):
"""
Template for files with buffered reading and writing
Parameters
----------
fs: instance of FileSystem
path: str
location in file-system
mode: str
Normal file modes. Currently only 'wb', 'ab' or 'rb'. Some file
systems may be read-only, and some may not support append.
block_size: int
Buffer size for reading or writing, 'default' for class default
autocommit: bool
Whether to write to final destination; may only impact what
happens when file is being closed.
cache_type: {"readahead", "none", "mmap", "bytes"}, default "readahead"
Caching policy in read mode. See the definitions in ``core``.
cache_options : dict
Additional options passed to the constructor for the cache specified
by `cache_type`.
kwargs:
Gets stored as self.kwargs
"""
from .core import caches
self.path = path
self.fs = fs
self.mode = mode
self.blocksize = (
self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size
)
self.loc = 0
self.autocommit = autocommit
self.end = None
self.start = None
self.closed = False
if cache_options is None:
cache_options = {}
if "trim" in kwargs:
warnings.warn(
"Passing 'trim' to control the cache behavior has been deprecated. "
"Specify it within the 'cache_options' argument instead.",
FutureWarning,
)
cache_options["trim"] = kwargs.pop("trim")
self.kwargs = kwargs
if mode not in {"ab", "rb", "wb"}:
raise NotImplementedError("File mode not supported")
if mode == "rb":
if not hasattr(self, "details"):
self.details = fs.info(path)
self.size = self.details["size"]
self.cache = caches[cache_type](
self.blocksize, self._fetch_range, self.size, **cache_options
)
else:
self.buffer = io.BytesIO()
self.offset = None
self.forced = False
self.location = None
@property
def closed(self):
# get around this attr being read-only in IOBase
# use getattr here, since this can be called during del
return getattr(self, "_closed", True)
@closed.setter
def closed(self, c):
self._closed = c
def __hash__(self):
if "w" in self.mode:
return id(self)
else:
return int(tokenize(self.details), 16)
def __eq__(self, other):
"""Files are equal if they have the same checksum, only in read mode"""
return self.mode == "rb" and other.mode == "rb" and hash(self) == hash(other)
def commit(self):
"""Move from temp to final destination"""
def discard(self):
"""Throw away temporary file"""
def info(self):
""" File information about this path """
if "r" in self.mode:
return self.details
else:
raise ValueError("Info not available while writing")
def tell(self):
""" Current file location """
return self.loc
def seek(self, loc, whence=0):
"""Set current file location
Parameters
----------
loc: int
byte location
whence: {0, 1, 2}
from start of file, current location or end of file, resp.
"""
loc = int(loc)
if not self.mode == "rb":
raise OSError(ESPIPE, "Seek only available in read mode")
if whence == 0:
nloc = loc
elif whence == 1:
nloc = self.loc + loc
elif whence == 2:
nloc = self.size + loc
else:
raise ValueError("invalid whence (%s, should be 0, 1 or 2)" % whence)
if nloc < 0:
raise ValueError("Seek before start of file")
self.loc = nloc
return self.loc
def write(self, data):
"""
Write data to buffer.
Buffer only sent on flush() or if buffer is greater than
or equal to blocksize.
Parameters
----------
data: bytes
Set of bytes to be written.
"""
if self.mode not in {"wb", "ab"}:
raise ValueError("File not in write mode")
if self.closed:
raise ValueError("I/O operation on closed file.")
if self.forced:
raise ValueError("This file has been force-flushed, can only close")
out = self.buffer.write(data)
self.loc += out
if self.buffer.tell() >= self.blocksize:
self.flush()
return out
def flush(self, force=False):
"""
Write buffered data to backend store.
Writes the current buffer, if it is larger than the block-size, or if
the file is being closed.
Parameters
----------
force: bool
When closing, write the last block even if it is smaller than
blocks are allowed to be. Disallows further writing to this file.
"""
if self.closed:
raise ValueError("Flush on closed file")
if force and self.forced:
raise ValueError("Force flush cannot be called more than once")
if force:
self.forced = True
if self.mode not in {"wb", "ab"}:
# no-op to flush on read-mode
return
if not force and self.buffer.tell() < self.blocksize:
# Defer write on small block
return
if self.offset is None:
# Initialize a multipart upload
self.offset = 0
try:
self._initiate_upload()
except: # noqa: E722
self.closed = True
raise
if self._upload_chunk(final=force) is not False:
self.offset += self.buffer.seek(0, 2)
self.buffer = io.BytesIO()
def _upload_chunk(self, final=False):
"""Write one part of a multi-block file upload
Parameters
==========
final: bool
This is the last block, so should complete file, if
self.autocommit is True.
"""
# may not yet have been initialized, may need to call _initialize_upload
def _initiate_upload(self):
""" Create remote file/upload """
pass
def _fetch_range(self, start, end):
"""Get the specified set of bytes from remote"""
raise NotImplementedError
def read(self, length=-1):
"""
Return data from cache, or fetch pieces as necessary
Parameters
----------
length: int (-1)
Number of bytes to read; if <0, all remaining bytes.
"""
length = -1 if length is None else int(length)
if self.mode != "rb":
raise ValueError("File not in read mode")
if length < 0:
length = self.size - self.loc
if self.closed:
raise ValueError("I/O operation on closed file.")
logger.debug("%s read: %i - %i" % (self, self.loc, self.loc + length))
if length == 0:
# don't even bother calling fetch
return b""
out = self.cache._fetch(self.loc, self.loc + length)
self.loc += len(out)
return out
def readinto(self, b):
"""mirrors builtin file's readinto method
https://docs.python.org/3/library/io.html#io.RawIOBase.readinto
"""
out = memoryview(b).cast("B")
data = self.read(out.nbytes)
out[: len(data)] = data
return len(data)
def readuntil(self, char=b"\n", blocks=None):
"""Return data between current position and first occurrence of char
char is included in the output, except if the end of the tile is
encountered first.
Parameters
----------
char: bytes
Thing to find
blocks: None or int
How much to read in each go. Defaults to file blocksize - which may
mean a new read on every call.
"""
out = []
while True:
start = self.tell()
part = self.read(blocks or self.blocksize)
if len(part) == 0:
break
found = part.find(char)
if found > -1:
out.append(part[: found + len(char)])
self.seek(start + found + len(char))
break
out.append(part)
return b"".join(out)
def readline(self):
"""Read until first occurrence of newline character
Note that, because of character encoding, this is not necessarily a
true line ending.
"""
return self.readuntil(b"\n")
def __next__(self):
out = self.readline()
if out:
return out
raise StopIteration
def __iter__(self):
return self
def readlines(self):
"""Return all data, split by the newline character"""
data = self.read()
lines = data.split(b"\n")
out = [l + b"\n" for l in lines[:-1]]
if data.endswith(b"\n"):
return out
else:
return out + [lines[-1]]
# return list(self) ???
def readinto1(self, b):
return self.readinto(b)
def close(self):
"""Close file
Finalizes writes, discards cache
"""
if getattr(self, "_unclosable", False):
return
if self.closed:
return
if self.mode == "rb":
self.cache = None
else:
if not self.forced:
self.flush(force=True)
if self.fs is not None:
self.fs.invalidate_cache(self.path)
self.fs.invalidate_cache(self.fs._parent(self.path))
self.closed = True
def readable(self):
"""Whether opened for reading"""
return self.mode == "rb" and not self.closed
def seekable(self):
"""Whether is seekable (only in read mode)"""
return self.readable()
def writable(self):
"""Whether opened for writing"""
return self.mode in {"wb", "ab"} and not self.closed
def __del__(self):
if not self.closed:
self.close()
def __str__(self):
return "<File-like object %s, %s>" % (type(self.fs).__name__, self.path)
__repr__ = __str__
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
| 33.694704
| 88
| 0.561686
|
529aef37e88a072e5fce72fd562c6cbf8531551b
| 732
|
py
|
Python
|
sql_app/models.py
|
IEPEREZ/FastAPItutorial
|
cac10f4ddb60bb95aa5aa9a2871f019720f53d3d
|
[
"MIT"
] | null | null | null |
sql_app/models.py
|
IEPEREZ/FastAPItutorial
|
cac10f4ddb60bb95aa5aa9a2871f019720f53d3d
|
[
"MIT"
] | null | null | null |
sql_app/models.py
|
IEPEREZ/FastAPItutorial
|
cac10f4ddb60bb95aa5aa9a2871f019720f53d3d
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from database import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
hashed_password = Column(String)
is_active = Column(Boolean, default=True)
items = relationship("Item", back_populates="owner")
class Item(Base):
__tablename__ = "items"
id = Column(Integer, primary_key=True, index=True)
title = Column(String, index=True)
description = Column(String, index=True)
owner_id = Column(Integer, ForeignKey("users.id"))
owner = relationship("User", back_populates="items")
| 28.153846
| 67
| 0.717213
|
d9b777de06489891ff67e5f8c180b5f8eea1d101
| 2,574
|
py
|
Python
|
lc/models/torch/lenet300.py
|
akamaster/anon_submission
|
105189fc1720a6b0c27df7e686519a6ffa75632b
|
[
"BSD-3-Clause"
] | 40
|
2020-05-19T01:31:02.000Z
|
2022-01-28T00:25:33.000Z
|
lc/models/torch/lenet300.py
|
akamaster/anon_submission
|
105189fc1720a6b0c27df7e686519a6ffa75632b
|
[
"BSD-3-Clause"
] | 3
|
2020-09-09T16:04:17.000Z
|
2022-03-18T00:59:39.000Z
|
lc/models/torch/lenet300.py
|
akamaster/anon_submission
|
105189fc1720a6b0c27df7e686519a6ffa75632b
|
[
"BSD-3-Clause"
] | 8
|
2020-09-08T14:11:16.000Z
|
2022-01-28T00:25:41.000Z
|
import torch.nn as nn
import torch.nn.init as init
from collections import OrderedDict
from .utils import LambdaLayer
__all__ = ['lenet300_classic', 'lenet300_classic_drop', 'lenet300_modern', 'lenet300_modern_drop']
def _weights_init(m):
classname = m.__class__.__name__
print(classname)
if isinstance(m, nn.Linear):
init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.0)
class LeNet300(nn.Module):
"""
Base LeNet300 module that allows to reconfigure it. LeNet300 is a network having 3 layers with weights of
dimensions 784x300, 300x100 and 100x10 and trained on MNIST dataset.
References:
Yann LeCun, Leon Bottou, Yoshua Bengio, and Patrick Haffner
Gradient-based learning applied to document recognition
https://ieeexplore.ieee.org/document/726791
"""
def __init__(self, dropout, nonlinearity):
"""
Constructor of LeNet300. With given options it is possible to set various nonlinearities and
add dropout layers after them
:param dropout: boolean, if True, the dropout layer will be added after every inter hidden layer,
no dropout after last layer
:param nonlinearity: function, a constructor that returns nonlinearity function
"""
super(LeNet300, self).__init__()
layers = [(784, 300), (300, 100), (100, 10)]
cfg = [('reshape', LambdaLayer(lambda x: x.view(x.size(0),-1)))]
for i, l in enumerate(layers):
cfg.append(('compressible_' + str(i), nn.Linear(*l)))
if i != len(layers)-1:
# only non terminal layers have nonlinearity and (possible) dropouts
cfg.append(('nonlinearity_' + str(i), nonlinearity()))
if dropout:
cfg.append(('drop_'+str(i), nn.Dropout()))
self.output = nn.Sequential(OrderedDict(cfg))
self.loss = nn.CrossEntropyLoss()
self.apply(_weights_init)
def forward(self, input):
return self.output(input)
def lenet300_classic():
"""
Creates classical version of LeNet300, the one having tanh activation functions and
no dropouts
"""
return LeNet300(dropout=False, nonlinearity=nn.Tanh)
def lenet300_classic_drop():
"""
Returns classical LeNet300 with intermediate dropouts between layers.
"""
return LeNet300(dropout=True, nonlinearity=nn.Tanh)
def lenet300_modern():
return LeNet300(dropout=False, nonlinearity=nn.ReLU)
def lenet300_modern_drop():
return LeNet300(dropout=True, nonlinearity=nn.ReLU)
| 34.32
| 109
| 0.669386
|
1cd9069665f600d44f09aed8a540010012676c2e
| 858
|
py
|
Python
|
wbb/modules/telegraph.py
|
divyanshmusic/WilliamButcherBot
|
1d905de0ee7906c530df1b91b1fada7909d5f1e8
|
[
"MIT"
] | 1
|
2021-12-06T13:59:52.000Z
|
2021-12-06T13:59:52.000Z
|
wbb/modules/telegraph.py
|
divyanshmusic/WilliamButcherBot
|
1d905de0ee7906c530df1b91b1fada7909d5f1e8
|
[
"MIT"
] | null | null | null |
wbb/modules/telegraph.py
|
divyanshmusic/WilliamButcherBot
|
1d905de0ee7906c530df1b91b1fada7909d5f1e8
|
[
"MIT"
] | 2
|
2022-03-18T17:40:10.000Z
|
2022-03-24T07:09:44.000Z
|
from pyrogram import filters
from pyrogram.types import Message
from wbb import app, telegraph
from wbb.core.decorators.errors import capture_err
__MODULE__ = "Telegraph"
__HELP__ = "/telegraph [Page name]: Paste styled text on telegraph."
@app.on_message(filters.command("telegraph"))
@capture_err
async def paste(_, message: Message):
reply = message.reply_to_message
if not reply or not reply.text:
return await message.reply("Reply to a text message")
if len(message.command) < 2:
return await message.reply("**Usage:**\n /telegraph [Page name]")
page_name = message.text.split(None, 1)[1]
page = telegraph.create_page(
page_name, html_content=reply.text.html.replace("\n", "<br>")
)
return await message.reply(
f"**Posted:** {page['url']}",
disable_web_page_preview=True,
)
| 28.6
| 73
| 0.691142
|
518038d29c2ce0faf7f863301b91310792219e41
| 41,201
|
py
|
Python
|
venv/lib/python3.6/site-packages/sqlalchemy/engine/interfaces.py
|
tchengatcincoai/cryptocoin-prices-compare
|
f295fecc7213a877bf717af0eb98414e9137b554
|
[
"MIT"
] | 78
|
2017-08-19T03:46:13.000Z
|
2020-02-19T04:29:45.000Z
|
desktop/core/ext-py/SQLAlchemy-1.2.0b3/lib/sqlalchemy/engine/interfaces.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 5
|
2017-08-21T16:33:08.000Z
|
2018-06-21T18:37:18.000Z
|
desktop/core/ext-py/SQLAlchemy-1.2.0b3/lib/sqlalchemy/engine/interfaces.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 43
|
2018-02-05T23:23:46.000Z
|
2021-07-28T22:51:42.000Z
|
# engine/interfaces.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define core interfaces used by the engine system."""
from .. import util
# backwards compat
from ..sql.compiler import Compiled, TypeCompiler
class Dialect(object):
"""Define the behavior of a specific database and DB-API combination.
Any aspect of metadata definition, SQL query generation,
execution, result-set handling, or anything else which varies
between databases is defined under the general category of the
Dialect. The Dialect acts as a factory for other
database-specific object implementations including
ExecutionContext, Compiled, DefaultGenerator, and TypeEngine.
All Dialects implement the following attributes:
name
identifying name for the dialect from a DBAPI-neutral point of view
(i.e. 'sqlite')
driver
identifying name for the dialect's DBAPI
positional
True if the paramstyle for this Dialect is positional.
paramstyle
the paramstyle to be used (some DB-APIs support multiple
paramstyles).
convert_unicode
True if Unicode conversion should be applied to all ``str``
types.
encoding
type of encoding to use for unicode, usually defaults to
'utf-8'.
statement_compiler
a :class:`.Compiled` class used to compile SQL statements
ddl_compiler
a :class:`.Compiled` class used to compile DDL statements
server_version_info
a tuple containing a version number for the DB backend in use.
This value is only available for supporting dialects, and is
typically populated during the initial connection to the database.
default_schema_name
the name of the default schema. This value is only available for
supporting dialects, and is typically populated during the
initial connection to the database.
execution_ctx_cls
a :class:`.ExecutionContext` class used to handle statement execution
execute_sequence_format
either the 'tuple' or 'list' type, depending on what cursor.execute()
accepts for the second argument (they vary).
preparer
a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to
quote identifiers.
supports_alter
``True`` if the database supports ``ALTER TABLE``.
max_identifier_length
The maximum length of identifier names.
supports_unicode_statements
Indicate whether the DB-API can receive SQL statements as Python
unicode strings
supports_unicode_binds
Indicate whether the DB-API can receive string bind parameters
as Python unicode strings
supports_sane_rowcount
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements.
supports_sane_multi_rowcount
Indicate whether the dialect properly implements rowcount for
``UPDATE`` and ``DELETE`` statements when executed via
executemany.
preexecute_autoincrement_sequences
True if 'implicit' primary key functions must be executed separately
in order to get their value. This is currently oriented towards
PostgreSQL.
implicit_returning
use RETURNING or equivalent during INSERT execution in order to load
newly generated primary keys and other column defaults in one execution,
which are then available via inserted_primary_key.
If an insert statement has returning() specified explicitly,
the "implicit" functionality is not used and inserted_primary_key
will not be available.
colspecs
A dictionary of TypeEngine classes from sqlalchemy.types mapped
to subclasses that are specific to the dialect class. This
dictionary is class-level only and is not accessed from the
dialect instance itself.
supports_default_values
Indicates if the construct ``INSERT INTO tablename DEFAULT
VALUES`` is supported
supports_sequences
Indicates if the dialect supports CREATE SEQUENCE or similar.
sequences_optional
If True, indicates if the "optional" flag on the Sequence() construct
should signal to not generate a CREATE SEQUENCE. Applies only to
dialects that support sequences. Currently used only to allow PostgreSQL
SERIAL to be used on a column that specifies Sequence() for usage on
other backends.
supports_native_enum
Indicates if the dialect supports a native ENUM construct.
This will prevent types.Enum from generating a CHECK
constraint when that type is used.
supports_native_boolean
Indicates if the dialect supports a native boolean construct.
This will prevent types.Boolean from generating a CHECK
constraint when that type is used.
dbapi_exception_translation_map
A dictionary of names that will contain as values the names of
pep-249 exceptions ("IntegrityError", "OperationalError", etc)
keyed to alternate class names, to support the case where a
DBAPI has exception classes that aren't named as they are
referred to (e.g. IntegrityError = MyException). In the vast
majority of cases this dictionary is empty.
.. versionadded:: 1.0.5
"""
_has_events = False
def create_connect_args(self, url):
"""Build DB-API compatible connection arguments.
Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple
consisting of a `*args`/`**kwargs` suitable to send directly
to the dbapi's connect function.
"""
raise NotImplementedError()
@classmethod
def type_descriptor(cls, typeobj):
"""Transform a generic type to a dialect-specific type.
Dialect classes will usually use the
:func:`.types.adapt_type` function in the types module to
accomplish this.
The returned result is cached *per dialect class* so can
contain no dialect-instance state.
"""
raise NotImplementedError()
def initialize(self, connection):
"""Called during strategized creation of the dialect with a
connection.
Allows dialects to configure options based on server version info or
other properties.
The connection passed here is a SQLAlchemy Connection object,
with full capabilities.
The initialize() method of the base dialect should be called via
super().
"""
pass
def reflecttable(
self, connection, table, include_columns, exclude_columns):
"""Load table description from the database.
Given a :class:`.Connection` and a
:class:`~sqlalchemy.schema.Table` object, reflect its columns and
properties from the database.
The implementation of this method is provided by
:meth:`.DefaultDialect.reflecttable`, which makes use of
:class:`.Inspector` to retrieve column information.
Dialects should **not** seek to implement this method, and should
instead implement individual schema inspection operations such as
:meth:`.Dialect.get_columns`, :meth:`.Dialect.get_pk_constraint`,
etc.
"""
raise NotImplementedError()
def get_columns(self, connection, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return column
information as a list of dictionaries with these keys:
name
the column's name
type
[sqlalchemy.types#TypeEngine]
nullable
boolean
default
the column's default value
autoincrement
boolean
sequence
a dictionary of the form
{'name' : str, 'start' :int, 'increment': int, 'minvalue': int,
'maxvalue': int, 'nominvalue': bool, 'nomaxvalue': bool,
'cycle': bool, 'cache': int, 'order': bool}
Additional column attributes may be present.
"""
raise NotImplementedError()
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Deprecated. This method is only called by the default
implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should
instead implement the :meth:`.Dialect.get_pk_constraint` method
directly.
"""
raise NotImplementedError()
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""Return information about the primary key constraint on
table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return primary
key information as a dictionary with these keys:
constrained_columns
a list of column names that make up the primary key
name
optional name of the primary key constraint.
"""
raise NotImplementedError()
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a :class:`.Connection`, a string
`table_name`, and an optional string `schema`, return foreign
key information as a list of dicts with these keys:
name
the constraint's name
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
"""
raise NotImplementedError()
def get_table_names(self, connection, schema=None, **kw):
"""Return a list of table names for `schema`."""
raise NotImplementedError()
def get_temp_table_names(self, connection, schema=None, **kw):
"""Return a list of temporary table names on the given connection,
if supported by the underlying backend.
"""
raise NotImplementedError()
def get_view_names(self, connection, schema=None, **kw):
"""Return a list of all view names available in the database.
schema:
Optional, retrieve names from a non-default schema.
"""
raise NotImplementedError()
def get_temp_view_names(self, connection, schema=None, **kw):
"""Return a list of temporary view names on the given connection,
if supported by the underlying backend.
"""
raise NotImplementedError()
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`.Connection`, a string
`view_name`, and an optional string `schema`, return the view
definition.
"""
raise NotImplementedError()
def get_indexes(self, connection, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a :class:`.Connection`, a string
`table_name` and an optional string `schema`, return index
information as a list of dictionaries with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
"""
raise NotImplementedError()
def get_unique_constraints(
self, connection, table_name, schema=None, **kw):
r"""Return information about unique constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
unique constraint information as a list of dicts with these keys:
name
the unique constraint's name
column_names
list of column names in order
\**kw
other options passed to the dialect's get_unique_constraints()
method.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def get_check_constraints(
self, connection, table_name, schema=None, **kw):
r"""Return information about check constraints in `table_name`.
Given a string `table_name` and an optional string `schema`, return
check constraint information as a list of dicts with these keys:
name
the check constraint's name
sqltext
the check constraint's SQL expression
\**kw
other options passed to the dialect's get_check_constraints()
method.
.. versionadded:: 1.1.0
"""
raise NotImplementedError()
def get_table_comment(
self, connection, table_name, schema=None, **kw):
r"""Return the "comment" for the table identified by `table_name`.
Given a string `table_name` and an optional string `schema`, return
table comment information as a dictionary with this key:
text
text of the comment
Raises ``NotImplementedError`` for dialects that don't support
comments.
.. versionadded:: 1.2
"""
raise NotImplementedError()
def normalize_name(self, name):
"""convert the given name to lowercase if it is detected as
case insensitive.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def denormalize_name(self, name):
"""convert the given name to a case insensitive identifier
for the backend if it is an all-lowercase name.
this method is only used if the dialect defines
requires_name_normalize=True.
"""
raise NotImplementedError()
def has_table(self, connection, table_name, schema=None):
"""Check the existence of a particular table in the database.
Given a :class:`.Connection` object and a string
`table_name`, return True if the given table (possibly within
the specified `schema`) exists in the database, False
otherwise.
"""
raise NotImplementedError()
def has_sequence(self, connection, sequence_name, schema=None):
"""Check the existence of a particular sequence in the database.
Given a :class:`.Connection` object and a string
`sequence_name`, return True if the given sequence exists in
the database, False otherwise.
"""
raise NotImplementedError()
def _get_server_version_info(self, connection):
"""Retrieve the server version info from the given connection.
This is used by the default implementation to populate the
"server_version_info" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def _get_default_schema_name(self, connection):
"""Return the string name of the currently selected schema from
the given connection.
This is used by the default implementation to populate the
"default_schema_name" attribute and is called exactly
once upon first connect.
"""
raise NotImplementedError()
def do_begin(self, dbapi_connection):
"""Provide an implementation of ``connection.begin()``, given a
DB-API connection.
The DBAPI has no dedicated "begin" method and it is expected
that transactions are implicit. This hook is provided for those
DBAPIs that might need additional help in this area.
Note that :meth:`.Dialect.do_begin` is not called unless a
:class:`.Transaction` object is in use. The
:meth:`.Dialect.do_autocommit`
hook is provided for DBAPIs that need some extra commands emitted
after a commit in order to enter the next transaction, when the
SQLAlchemy :class:`.Connection` is used in its default "autocommit"
mode.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_rollback(self, dbapi_connection):
"""Provide an implementation of ``connection.rollback()``, given
a DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_commit(self, dbapi_connection):
"""Provide an implementation of ``connection.commit()``, given a
DB-API connection.
:param dbapi_connection: a DBAPI connection, typically
proxied within a :class:`.ConnectionFairy`.
"""
raise NotImplementedError()
def do_close(self, dbapi_connection):
"""Provide an implementation of ``connection.close()``, given a DBAPI
connection.
This hook is called by the :class:`.Pool` when a connection has been
detached from the pool, or is being returned beyond the normal
capacity of the pool.
.. versionadded:: 0.8
"""
raise NotImplementedError()
def create_xid(self):
"""Create a two-phase transaction ID.
This id will be passed to do_begin_twophase(),
do_rollback_twophase(), do_commit_twophase(). Its format is
unspecified.
"""
raise NotImplementedError()
def do_savepoint(self, connection, name):
"""Create a savepoint with the given name.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_rollback_to_savepoint(self, connection, name):
"""Rollback a connection to the named savepoint.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_release_savepoint(self, connection, name):
"""Release the named savepoint on a connection.
:param connection: a :class:`.Connection`.
:param name: savepoint name.
"""
raise NotImplementedError()
def do_begin_twophase(self, connection, xid):
"""Begin a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_prepare_twophase(self, connection, xid):
"""Prepare a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
"""
raise NotImplementedError()
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
"""Rollback a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
"""Commit a two phase transaction on the given connection.
:param connection: a :class:`.Connection`.
:param xid: xid
:param is_prepared: whether or not
:meth:`.TwoPhaseTransaction.prepare` was called.
:param recover: if the recover flag was passed.
"""
raise NotImplementedError()
def do_recover_twophase(self, connection):
"""Recover list of uncommitted prepared two phase transaction
identifiers on the given connection.
:param connection: a :class:`.Connection`.
"""
raise NotImplementedError()
def do_executemany(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.executemany(statement,
parameters)``."""
raise NotImplementedError()
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of ``cursor.execute(statement,
parameters)``."""
raise NotImplementedError()
def do_execute_no_params(self, cursor, statement, parameters,
context=None):
"""Provide an implementation of ``cursor.execute(statement)``.
The parameter collection should not be sent.
"""
raise NotImplementedError()
def is_disconnect(self, e, connection, cursor):
"""Return True if the given DB-API error indicates an invalid
connection"""
raise NotImplementedError()
def connect(self):
"""return a callable which sets up a newly created DBAPI connection.
The callable accepts a single argument "conn" which is the
DBAPI connection itself. It has no return value.
This is used to set dialect-wide per-connection options such as
isolation modes, unicode modes, etc.
If a callable is returned, it will be assembled into a pool listener
that receives the direct DBAPI connection, with all wrappers removed.
If None is returned, no listener will be generated.
"""
return None
def reset_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, revert its isolation to the default.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`.Connection` and
:class:`.Engine`
isolation level facilities; these APIs should be preferred for
most typical use cases.
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`.Connection` isolation level
:paramref:`.create_engine.isolation_level` -
set per :class:`.Engine` isolation level
"""
raise NotImplementedError()
def set_isolation_level(self, dbapi_conn, level):
"""Given a DBAPI connection, set its isolation level.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`.Connection` and
:class:`.Engine`
isolation level facilities; these APIs should be preferred for
most typical use cases.
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`.Connection` isolation level
:paramref:`.create_engine.isolation_level` -
set per :class:`.Engine` isolation level
"""
raise NotImplementedError()
def get_isolation_level(self, dbapi_conn):
"""Given a DBAPI connection, return its isolation level.
When working with a :class:`.Connection` object, the corresponding
DBAPI connection may be procured using the
:attr:`.Connection.connection` accessor.
Note that this is a dialect-level method which is used as part
of the implementation of the :class:`.Connection` and
:class:`.Engine` isolation level facilities;
these APIs should be preferred for most typical use cases.
.. seealso::
:meth:`.Connection.get_isolation_level` - view current level
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level` -
set per :class:`.Connection` isolation level
:paramref:`.create_engine.isolation_level` -
set per :class:`.Engine` isolation level
"""
raise NotImplementedError()
@classmethod
def get_dialect_cls(cls, url):
"""Given a URL, return the :class:`.Dialect` that will be used.
This is a hook that allows an external plugin to provide functionality
around an existing dialect, by allowing the plugin to be loaded
from the url based on an entrypoint, and then the plugin returns
the actual dialect to be used.
By default this just returns the cls.
.. versionadded:: 1.0.3
"""
return cls
@classmethod
def engine_created(cls, engine):
"""A convenience hook called before returning the final :class:`.Engine`.
If the dialect returned a different class from the
:meth:`.get_dialect_cls`
method, then the hook is called on both classes, first on
the dialect class returned by the :meth:`.get_dialect_cls` method and
then on the class on which the method was called.
The hook should be used by dialects and/or wrappers to apply special
events to the engine or its components. In particular, it allows
a dialect-wrapping class to apply dialect-level events.
.. versionadded:: 1.0.3
"""
pass
class CreateEnginePlugin(object):
"""A set of hooks intended to augment the construction of an
:class:`.Engine` object based on entrypoint names in a URL.
The purpose of :class:`.CreateEnginePlugin` is to allow third-party
systems to apply engine, pool and dialect level event listeners without
the need for the target application to be modified; instead, the plugin
names can be added to the database URL. Target applications for
:class:`.CreateEnginePlugin` include:
* connection and SQL performance tools, e.g. which use events to track
number of checkouts and/or time spent with statements
* connectivity plugins such as proxies
Plugins are registered using entry points in a similar way as that
of dialects::
entry_points={
'sqlalchemy.plugins': [
'myplugin = myapp.plugins:MyPlugin'
]
A plugin that uses the above names would be invoked from a database
URL as in::
from sqlalchemy import create_engine
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?plugin=myplugin")
The ``plugin`` argument supports multiple instances, so that a URL
may specify multiple plugins; they are loaded in the order stated
in the URL::
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/"
"test?plugin=plugin_one&plugin=plugin_twp&plugin=plugin_three")
A plugin can receive additional arguments from the URL string as
well as from the keyword arguments passed to :func:`.create_engine`.
The :class:`.URL` object and the keyword dictionary are passed to the
constructor so that these arguments can be extracted from the url's
:attr:`.URL.query` collection as well as from the dictionary::
class MyPlugin(CreateEnginePlugin):
def __init__(self, url, kwargs):
self.my_argument_one = url.query.pop('my_argument_one')
self.my_argument_two = url.query.pop('my_argument_two')
self.my_argument_three = kwargs.pop('my_argument_three', None)
Arguments like those illustrated above would be consumed from the
following::
from sqlalchemy import create_engine
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/"
"test?plugin=myplugin&my_argument_one=foo&my_argument_two=bar",
my_argument_three='bat')
The URL and dictionary are used for subsequent setup of the engine
as they are, so the plugin can modify their arguments in-place.
Arguments that are only understood by the plugin should be popped
or otherwise removed so that they aren't interpreted as erroneous
arguments afterwards.
When the engine creation process completes and produces the
:class:`.Engine` object, it is again passed to the plugin via the
:meth:`.CreateEnginePlugin.engine_created` hook. In this hook, additional
changes can be made to the engine, most typically involving setup of
events (e.g. those defined in :ref:`core_event_toplevel`).
.. versionadded:: 1.1
"""
def __init__(self, url, kwargs):
"""Contruct a new :class:`.CreateEnginePlugin`.
The plugin object is instantiated individually for each call
to :func:`.create_engine`. A single :class:`.Engine` will be
passed to the :meth:`.CreateEnginePlugin.engine_created` method
corresponding to this URL.
:param url: the :class:`.URL` object. The plugin should inspect
what it needs here as well as remove its custom arguments from the
:attr:`.URL.query` collection. The URL can be modified in-place
in any other way as well.
:param kwargs: The keyword arguments passed to :func`.create_engine`.
The plugin can read and modify this dictionary in-place, to affect
the ultimate arguments used to create the engine. It should
remove its custom arguments from the dictionary as well.
"""
self.url = url
def handle_dialect_kwargs(self, dialect_cls, dialect_args):
"""parse and modify dialect kwargs"""
def handle_pool_kwargs(self, pool_cls, pool_args):
"""parse and modify pool kwargs"""
def engine_created(self, engine):
"""Receive the :class:`.Engine` object when it is fully constructed.
The plugin may make additional changes to the engine, such as
registering engine or connection pool events.
"""
class ExecutionContext(object):
"""A messenger object for a Dialect that corresponds to a single
execution.
ExecutionContext should have these data members:
connection
Connection object which can be freely used by default value
generators to execute SQL. This Connection should reference the
same underlying connection/transactional resources of
root_connection.
root_connection
Connection object which is the source of this ExecutionContext. This
Connection may have close_with_result=True set, in which case it can
only be used once.
dialect
dialect which created this ExecutionContext.
cursor
DB-API cursor procured from the connection,
compiled
if passed to constructor, sqlalchemy.engine.base.Compiled object
being executed,
statement
string version of the statement to be executed. Is either
passed to the constructor, or must be created from the
sql.Compiled object by the time pre_exec() has completed.
parameters
bind parameters passed to the execute() method. For compiled
statements, this is a dictionary or list of dictionaries. For
textual statements, it should be in a format suitable for the
dialect's paramstyle (i.e. dict or list of dicts for non
positional, list or list of lists/tuples for positional).
isinsert
True if the statement is an INSERT.
isupdate
True if the statement is an UPDATE.
should_autocommit
True if the statement is a "committable" statement.
prefetch_cols
a list of Column objects for which a client-side default
was fired off. Applies to inserts and updates.
postfetch_cols
a list of Column objects for which a server-side default or
inline SQL expression value was fired off. Applies to inserts
and updates.
"""
exception = None
"""A DBAPI-level exception that was caught when this ExecutionContext
attempted to execute a statement.
This attribute is meaningful only within the
:meth:`.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.is_disconnect`
:meth:`.ConnectionEvents.dbapi_error`
"""
is_disconnect = None
"""Boolean flag set to True or False when a DBAPI-level exception
is caught when this ExecutionContext attempted to execute a statement.
This attribute is meaningful only within the
:meth:`.ConnectionEvents.dbapi_error` event.
.. versionadded:: 0.9.7
.. seealso::
:attr:`.ExecutionContext.exception`
:meth:`.ConnectionEvents.dbapi_error`
"""
def create_cursor(self):
"""Return a new cursor generated from this ExecutionContext's
connection.
Some dialects may wish to change the behavior of
connection.cursor(), such as postgresql which may return a PG
"server side" cursor.
"""
raise NotImplementedError()
def pre_exec(self):
"""Called before an execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `statement` and `parameters` datamembers must be
initialized after this statement is complete.
"""
raise NotImplementedError()
def post_exec(self):
"""Called after the execution of a compiled statement.
If a compiled statement was passed to this ExecutionContext,
the `last_insert_ids`, `last_inserted_params`, etc.
datamembers should be available after this method completes.
"""
raise NotImplementedError()
def result(self):
"""Return a result object corresponding to this ExecutionContext.
Returns a ResultProxy.
"""
raise NotImplementedError()
def handle_dbapi_exception(self, e):
"""Receive a DBAPI exception which occurred upon execute, result
fetch, etc."""
raise NotImplementedError()
def should_autocommit_text(self, statement):
"""Parse the given textual statement and return True if it refers to
a "committable" statement"""
raise NotImplementedError()
def lastrow_has_defaults(self):
"""Return True if the last INSERT or UPDATE row contained
inlined or database-side defaults.
"""
raise NotImplementedError()
def get_rowcount(self):
"""Return the DBAPI ``cursor.rowcount`` value, or in some
cases an interpreted value.
See :attr:`.ResultProxy.rowcount` for details on this.
"""
raise NotImplementedError()
class Connectable(object):
"""Interface for an object which supports execution of SQL constructs.
The two implementations of :class:`.Connectable` are
:class:`.Connection` and :class:`.Engine`.
Connectable must also implement the 'dialect' member which references a
:class:`.Dialect` instance.
"""
def connect(self, **kwargs):
"""Return a :class:`.Connection` object.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`.Connection`, or a newly
procured :class:`.Connection` if this object is an instance
of :class:`.Engine`.
"""
def contextual_connect(self):
"""Return a :class:`.Connection` object which may be part of an ongoing
context.
Depending on context, this may be ``self`` if this object
is already an instance of :class:`.Connection`, or a newly
procured :class:`.Connection` if this object is an instance
of :class:`.Engine`.
"""
raise NotImplementedError()
@util.deprecated("0.7",
"Use the create() method on the given schema "
"object directly, i.e. :meth:`.Table.create`, "
":meth:`.Index.create`, :meth:`.MetaData.create_all`")
def create(self, entity, **kwargs):
"""Emit CREATE statements for the given schema entity.
"""
raise NotImplementedError()
@util.deprecated("0.7",
"Use the drop() method on the given schema "
"object directly, i.e. :meth:`.Table.drop`, "
":meth:`.Index.drop`, :meth:`.MetaData.drop_all`")
def drop(self, entity, **kwargs):
"""Emit DROP statements for the given schema entity.
"""
raise NotImplementedError()
def execute(self, object, *multiparams, **params):
"""Executes the given construct and returns a :class:`.ResultProxy`."""
raise NotImplementedError()
def scalar(self, object, *multiparams, **params):
"""Executes and returns the first column of the first row.
The underlying cursor is closed after execution.
"""
raise NotImplementedError()
def _run_visitor(self, visitorcallable, element,
**kwargs):
raise NotImplementedError()
def _execute_clauseelement(self, elem, multiparams=None, params=None):
raise NotImplementedError()
class ExceptionContext(object):
"""Encapsulate information about an error condition in progress.
This object exists solely to be passed to the
:meth:`.ConnectionEvents.handle_error` event, supporting an interface that
can be extended without backwards-incompatibility.
.. versionadded:: 0.9.7
"""
connection = None
"""The :class:`.Connection` in use during the exception.
This member is present, except in the case of a failure when
first connecting.
.. seealso::
:attr:`.ExceptionContext.engine`
"""
engine = None
"""The :class:`.Engine` in use during the exception.
This member should always be present, even in the case of a failure
when first connecting.
.. versionadded:: 1.0.0
"""
cursor = None
"""The DBAPI cursor object.
May be None.
"""
statement = None
"""String SQL statement that was emitted directly to the DBAPI.
May be None.
"""
parameters = None
"""Parameter collection that was emitted directly to the DBAPI.
May be None.
"""
original_exception = None
"""The exception object which was caught.
This member is always present.
"""
sqlalchemy_exception = None
"""The :class:`sqlalchemy.exc.StatementError` which wraps the original,
and will be raised if exception handling is not circumvented by the event.
May be None, as not all exception types are wrapped by SQLAlchemy.
For DBAPI-level exceptions that subclass the dbapi's Error class, this
field will always be present.
"""
chained_exception = None
"""The exception that was returned by the previous handler in the
exception chain, if any.
If present, this exception will be the one ultimately raised by
SQLAlchemy unless a subsequent handler replaces it.
May be None.
"""
execution_context = None
"""The :class:`.ExecutionContext` corresponding to the execution
operation in progress.
This is present for statement execution operations, but not for
operations such as transaction begin/end. It also is not present when
the exception was raised before the :class:`.ExecutionContext`
could be constructed.
Note that the :attr:`.ExceptionContext.statement` and
:attr:`.ExceptionContext.parameters` members may represent a
different value than that of the :class:`.ExecutionContext`,
potentially in the case where a
:meth:`.ConnectionEvents.before_cursor_execute` event or similar
modified the statement/parameters to be sent.
May be None.
"""
is_disconnect = None
"""Represent whether the exception as occurred represents a "disconnect"
condition.
This flag will always be True or False within the scope of the
:meth:`.ConnectionEvents.handle_error` handler.
SQLAlchemy will defer to this flag in order to determine whether or not
the connection should be invalidated subsequently. That is, by
assigning to this flag, a "disconnect" event which then results in
a connection and pool invalidation can be invoked or prevented by
changing this flag.
"""
invalidate_pool_on_disconnect = True
"""Represent whether all connections in the pool should be invalidated
when a "disconnect" condition is in effect.
Setting this flag to False within the scope of the
:meth:`.ConnectionEvents.handle_error` event will have the effect such
that the full collection of connections in the pool will not be
invalidated during a disconnect; only the current connection that is the
subject of the error will actually be invalidated.
The purpose of this flag is for custom disconnect-handling schemes where
the invalidation of other connections in the pool is to be performed
based on other conditions, or even on a per-connection basis.
.. versionadded:: 1.0.3
"""
| 31.790895
| 81
| 0.665324
|
27db26e3f14b195f96ff70eca888b1ed2bf89363
| 3,042
|
py
|
Python
|
project/parsers/price.py
|
nikolay-py/product_optimizer
|
3d7da484984e63791849ce8a12b285a1ba2daacf
|
[
"MIT"
] | null | null | null |
project/parsers/price.py
|
nikolay-py/product_optimizer
|
3d7da484984e63791849ce8a12b285a1ba2daacf
|
[
"MIT"
] | 1
|
2021-07-05T13:42:19.000Z
|
2021-07-05T14:14:44.000Z
|
project/parsers/price.py
|
nikolay-py/product_optimizer
|
3d7da484984e63791849ce8a12b285a1ba2daacf
|
[
"MIT"
] | null | null | null |
"""We fill the base with products."""
from typing import Union
import bs4
from .crud import create_goods
def get_catalog_name(soup: bs4) -> Union[str, bool]:
"""Get catalog name."""
# Получаем имя каталога
link_page = soup.find(
'li', class_='breadcrumb-page__item breadcrumb-page__current')
try:
catalog_name = link_page.text.strip()
return catalog_name.lower()
except AttributeError:
print('Каталог без имени')
return False
def get_price_per_kg(price: str, weight: str) -> Union[str, bool]:
"""Price per kg."""
# Расчет цены за кг и проверка на пустые значения веса
try:
price_per_kg = float(price) / float(weight)
return "{0:.2f}".format(price_per_kg)
except (ValueError, ZeroDivisionError) as e:
print('-------------------------------------------------------')
print(f"Ошибка пустой строки {e}. Цена {price}, Вес{weight}")
return False
def get_float_price(str_price: str) -> str:
"""Get float price."""
# Вытаскиваем из разрозненного текста цену
item_price = ''.join([i for i in str_price if i.isnumeric()])
price = float(item_price) / 100
return "{0:.2f}".format(price)
def get_price(soup: bs4) -> None:
"""We get the main dictionary with the price of goods."""
# Получаем основной словарь с ценой товаров
items = {}
catalog_name = get_catalog_name(soup)
# Находим все классы catalog-item, даже лишние, и перебираем их
for item in soup.find_all('div', class_='product ok-theme'):
# Название
item_title = item.find('div', class_='product-name').find('a')['title'].lower()
# Цена
if item.find('span', class_='price label'):
search = item.find('span', class_='price label').text
item_price = get_float_price(search)
else:
search = item.find('div', class_='product-price').find('span').text
item_price = get_float_price(search)
# Вес и единица измерения
try:
product_weight = item.find('div', class_='product-weight')
units = product_weight.find('span').text.strip()
product_weight.find('span').extract()
weight = product_weight.text.strip().replace(',', '.')
except AttributeError:
units = 'шт'
weight = 1
# Цена за кг
price_per_kg = get_price_per_kg(item_price, weight)
if price_per_kg is not False and catalog_name is not False:
# Наполняем словарь
items = {
'key': catalog_name,
'name': item_title,
'price': float(item_price),
'weight': weight,
'units': units,
'price_per_kg': price_per_kg
}
# Сохраняем данные в базу данных
create_goods(items)
else:
print(f"{catalog_name} - {item_title} не учтена")
print('-------------------------------------------------------')
| 34.179775
| 87
| 0.571006
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.