hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0e63c97d6fcc120f5d220c8bb7de4c5493386dc
| 3,787
|
py
|
Python
|
scripts/test.py
|
MrGemy95/pytorch-SelectiveNet
|
50540c433e235d3e3824d4799fd61869f512f723
|
[
"MIT"
] | 13
|
2020-03-06T04:54:22.000Z
|
2022-03-29T11:33:24.000Z
|
scripts/test.py
|
MrGemy95/pytorch-SelectiveNet
|
50540c433e235d3e3824d4799fd61869f512f723
|
[
"MIT"
] | 1
|
2021-04-13T12:31:15.000Z
|
2021-04-13T12:31:15.000Z
|
scripts/test.py
|
MrGemy95/pytorch-SelectiveNet
|
50540c433e235d3e3824d4799fd61869f512f723
|
[
"MIT"
] | 7
|
2020-03-02T15:20:12.000Z
|
2022-01-23T07:59:57.000Z
|
import os
import sys
base = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../')
sys.path.append(base)
import click
from collections import OrderedDict
import torch
import torchvision
from external.dada.flag_holder import FlagHolder
from external.dada.metric import MetricDict
from external.dada.io import print_metric_dict
from external.dada.io import save_model
from external.dada.io import load_model
from external.dada.logger import Logger
from selectivenet.vgg_variant import vgg16_variant
from selectivenet.model import SelectiveNet
from selectivenet.loss import SelectiveLoss
from selectivenet.data import DatasetBuilder
from selectivenet.evaluator import Evaluator
# options
@click.command()
# model
@click.option('--dim_features', type=int, default=512)
@click.option('--dropout_prob', type=float, default=0.3)
@click.option('-w', '--weight', type=str, required=True, help='model weight path')
# data
@click.option('-d', '--dataset', type=str, required=True)
@click.option('--dataroot', type=str, default='/home/gatheluck/Scratch/selectivenet/data', help='path to dataset root')
@click.option('-j', '--num_workers', type=int, default=8)
@click.option('-N', '--batch_size', type=int, default=128)
@click.option('--normalize', is_flag=True, default=True)
# loss
@click.option('--coverage', type=float, required=True)
@click.option('--alpha', type=float, default=0.5, help='balancing parameter between selective_loss and ce_loss')
def main(**kwargs):
test(**kwargs)
def test(**kwargs):
FLAGS = FlagHolder()
FLAGS.initialize(**kwargs)
FLAGS.summary()
# dataset
dataset_builder = DatasetBuilder(name=FLAGS.dataset, root_path=FLAGS.dataroot)
test_dataset = dataset_builder(train=False, normalize=FLAGS.normalize)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=FLAGS.batch_size, shuffle=False, num_workers=FLAGS.num_workers, pin_memory=True)
# model
features = vgg16_variant(dataset_builder.input_size, FLAGS.dropout_prob).cuda()
model = SelectiveNet(features, FLAGS.dim_features, dataset_builder.num_classes).cuda()
load_model(model, FLAGS.weight)
if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model)
# loss
base_loss = torch.nn.CrossEntropyLoss(reduction='none')
SelectiveCELoss = SelectiveLoss(base_loss, coverage=FLAGS.coverage)
# pre epoch
test_metric_dict = MetricDict()
# test
with torch.autograd.no_grad():
for i, (x,t) in enumerate(test_loader):
model.eval()
x = x.to('cuda', non_blocking=True)
t = t.to('cuda', non_blocking=True)
# forward
out_class, out_select, out_aux = model(x)
# compute selective loss
loss_dict = OrderedDict()
# loss dict includes, 'empirical_risk' / 'emprical_coverage' / 'penulty'
selective_loss, loss_dict = SelectiveCELoss(out_class, out_select, t)
selective_loss *= FLAGS.alpha
loss_dict['selective_loss'] = selective_loss.detach().cpu().item()
# compute standard cross entropy loss
ce_loss = torch.nn.CrossEntropyLoss()(out_aux, t)
ce_loss *= (1.0 - FLAGS.alpha)
loss_dict['ce_loss'] = ce_loss.detach().cpu().item()
# total loss
loss = selective_loss + ce_loss
loss_dict['loss'] = loss.detach().cpu().item()
# evaluation
evaluator = Evaluator(out_class.detach(), t.detach(), out_select.detach())
loss_dict.update(evaluator())
test_metric_dict.update(loss_dict)
# post epoch
print_metric_dict(None, None, test_metric_dict.avg, mode='test')
return test_metric_dict.avg
if __name__ == '__main__':
main()
| 36.066667
| 154
| 0.693425
|
c5dc75a9336259e8a465110d81fadc9d20408ac5
| 2,201
|
py
|
Python
|
detection/show.py
|
cititude/Media-and-Cognition-Homework
|
dabaaef6d8ec115171e7115731c5f76b518d9bde
|
[
"MIT"
] | null | null | null |
detection/show.py
|
cititude/Media-and-Cognition-Homework
|
dabaaef6d8ec115171e7115731c5f76b518d9bde
|
[
"MIT"
] | null | null | null |
detection/show.py
|
cititude/Media-and-Cognition-Homework
|
dabaaef6d8ec115171e7115731c5f76b518d9bde
|
[
"MIT"
] | null | null | null |
import torch
import json
from utils.utils import *
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def main():
if not os.path.exists("imgs"):
os.mkdir("imgs")
jsonfile=json.load(open("filter_unique_pred17.json"))
imgs=jsonfile["imgs"]
for idx in imgs:
img=imgs[idx]
bboxes=img["objects"]
output=[]
mosaic = np.full((2048,2048, 3), 255, dtype=np.uint8)
# Fix class - colour map
prop_cycle = plt.rcParams['axes.prop_cycle']
hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
img=np.array(Image.open(os.path.join("../image_exp/Detection/test",idx+".jpg")).convert('RGB'))
mosaic = img
if(len(bboxes)>8):
print(idx)
else:
continue
for bbox in bboxes:
box=np.array([bbox["bbox"]["xmin"],bbox["bbox"]["ymin"],bbox["bbox"]["xmax"],bbox["bbox"]["ymax"]])
class_=bbox["category"]
conf=bbox["score"]
color = color_lut[hash(class_) % len(color_lut)]
plot_one_box(box, mosaic, label=class_, color=color, line_thickness=2)
# Image border
cv2.rectangle(mosaic, (1, 1), (2047, 2047), (255, 255, 255), thickness=3)
cv2.imwrite("imgs/{}.jpg".format(idx), cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB),)
main()
| 36.683333
| 115
| 0.585189
|
79fe90bf1096715710cd8d51ad5c6b3e37b37163
| 2,601
|
py
|
Python
|
gazoo_device/tests/unit_tests/esp32_pigweed_locking_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | 14
|
2020-11-05T23:23:32.000Z
|
2022-03-01T18:59:29.000Z
|
gazoo_device/tests/unit_tests/esp32_pigweed_locking_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | null | null | null |
gazoo_device/tests/unit_tests/esp32_pigweed_locking_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | 5
|
2021-05-20T22:52:51.000Z
|
2022-02-21T08:46:21.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for device class ESP32PigweedLocking."""
from unittest import mock
import gazoo_device
from gazoo_device.auxiliary_devices import esp32_pigweed_locking
from gazoo_device.tests.unit_tests.utils import fake_device_test_case
_PWRPC_COMMON_CLASS = (
gazoo_device.capabilities.pwrpc_common_default.PwRPCCommonDefault)
_FAKE_FW_VERSION = "0"
class ESP32PigweedLockingTests(fake_device_test_case.FakeDeviceTestCase):
"""Test module for device class ESP32PigweedLocking."""
def setUp(self):
super().setUp()
self.setup_fake_device_requirements("esp32pigweedlocking-1234")
self.device_config["persistent"]["console_port_name"] = "/dev/bus/usb/01/02"
self.uut = esp32_pigweed_locking.ESP32PigweedLocking(
self.mock_manager,
self.device_config,
log_directory=self.artifacts_directory)
@mock.patch.object(
_PWRPC_COMMON_CLASS, "software_version", new_callable=mock.PropertyMock)
def test_001_firmware_version(self, mock_fw_version):
"""Verifies the firmware_version."""
mock_fw_version.return_value = _FAKE_FW_VERSION
self.assertEqual(_FAKE_FW_VERSION, self.uut.firmware_version)
self.assertEqual(1, mock_fw_version.call_count)
@mock.patch.object(_PWRPC_COMMON_CLASS, "reboot")
def test_002_reboot(self, mock_reboot):
"""Verifies the reboot method."""
self.uut.reboot()
self.assertEqual(1, mock_reboot.call_count)
@mock.patch.object(_PWRPC_COMMON_CLASS, "factory_reset")
def test_003_factory_reset(self, mock_factory_reset):
"""Verifies the factory_reset method."""
self.uut.factory_reset()
self.assertEqual(1, mock_factory_reset.call_count)
def test_004_common_rpc_capability(self):
"""Verify the initialization of pw_rpc_common capability."""
self.assertTrue(self.uut.pw_rpc_common)
def test_005_locking_rpc_capability(self):
"""Verifies the initialization of pw_rpc_lock capability."""
self.assertTrue(self.uut.pw_rpc_lock)
if __name__ == "__main__":
fake_device_test_case.main()
| 36.633803
| 80
| 0.772011
|
2a0b876d49420fc816cc67f8c26adaa6ca1b83a6
| 682
|
py
|
Python
|
ctapipe/flow/algorithms/pair.py
|
mpecimotika/ctapipe
|
ffd7930921f7139b761fbf1208da16dd302e97a6
|
[
"BSD-3-Clause"
] | null | null | null |
ctapipe/flow/algorithms/pair.py
|
mpecimotika/ctapipe
|
ffd7930921f7139b761fbf1208da16dd302e97a6
|
[
"BSD-3-Clause"
] | null | null | null |
ctapipe/flow/algorithms/pair.py
|
mpecimotika/ctapipe
|
ffd7930921f7139b761fbf1208da16dd302e97a6
|
[
"BSD-3-Clause"
] | null | null | null |
from ctapipe.core import Component
from time import sleep
class Pair(Component):
"""Odd` class represents a Stage for pipeline.
It returns received value to Add stage except when
received value is a multiple of 5. In this case it returns
received value to Inverse stage
"""
def init(self):
self.log.debug("--- Pair init ---")
return True
def run(self, _input):
sleep(.5)
self.log.debug("Pair receive {}".format(_input))
if _input % 5 == 0:
return (_input, 'Inverse')
else:
return (_input, 'Add')
def finish(self):
self.log.debug("--- Pair finish ---")
pass
| 25.259259
| 62
| 0.589443
|
a394b5ddaffc919cf3b20b9d9b979e7fcfe4b583
| 866
|
py
|
Python
|
setup.py
|
gigapixel/robotframework-cylon
|
909cfef10dcca42958f220481f3e51ac1398d74e
|
[
"MIT"
] | null | null | null |
setup.py
|
gigapixel/robotframework-cylon
|
909cfef10dcca42958f220481f3e51ac1398d74e
|
[
"MIT"
] | null | null | null |
setup.py
|
gigapixel/robotframework-cylon
|
909cfef10dcca42958f220481f3e51ac1398d74e
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='robotframework-cylon',
version='0.0.1',
description='',
url='https://github.com/gigapixel',
author='Peerapat S.',
author_email='gigapixel7@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing'
],
packages=['cylon_robot'],
# entry_points={
# 'console_scripts': [
# 'cylon=cylon.command:main', 'behack=cylon.behack:main'
# ]},
install_requires=[
'selenium'
],
zip_safe=False)
| 30.928571
| 71
| 0.571594
|
f8e6083dbbaa37834dca61c4518f6c21ed728e2b
| 21,170
|
py
|
Python
|
pudb/settings.py
|
mlubimow/pudb
|
b36b10e8905971c700ddb6b41bf6102ea0a310c4
|
[
"MIT"
] | null | null | null |
pudb/settings.py
|
mlubimow/pudb
|
b36b10e8905971c700ddb6b41bf6102ea0a310c4
|
[
"MIT"
] | null | null | null |
pudb/settings.py
|
mlubimow/pudb
|
b36b10e8905971c700ddb6b41bf6102ea0a310c4
|
[
"MIT"
] | 1
|
2021-05-13T13:15:47.000Z
|
2021-05-13T13:15:47.000Z
|
__copyright__ = """
Copyright (C) 2009-2017 Andreas Kloeckner
Copyright (C) 2014-2017 Aaron Meurer
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import sys
from configparser import ConfigParser
from pudb.lowlevel import (lookup_module, get_breakpoint_invalid_reason,
settings_log)
# minor LGPL violation: stolen from python-xdg
_home = os.environ.get("HOME", None)
xdg_data_home = os.environ.get("XDG_DATA_HOME",
os.path.join(_home, ".local", "share") if _home else None)
XDG_CONFIG_HOME = os.environ.get("XDG_CONFIG_HOME",
os.path.join(_home, ".config") if _home else None)
if XDG_CONFIG_HOME:
XDG_CONFIG_DIRS = [XDG_CONFIG_HOME]
else:
XDG_CONFIG_DIRS = os.environ.get("XDG_CONFIG_DIRS", "/etc/xdg").split(":")
def get_save_config_path(*resource):
if XDG_CONFIG_HOME is None:
return None
if not resource:
resource = [XDG_CONF_RESOURCE]
# no idea what pylint's problem is here
resource = os.path.join(*resource) # pylint: disable=no-value-for-parameter
assert not resource.startswith("/")
path = os.path.join(XDG_CONFIG_HOME, resource)
if not os.path.isdir(path):
os.makedirs(path, 448) # 0o700
return path
# end LGPL violation
CONF_SECTION = "pudb"
XDG_CONF_RESOURCE = "pudb"
CONF_FILE_NAME = "pudb.cfg"
SAVED_BREAKPOINTS_FILE_NAME = "saved-breakpoints-%d.%d" % sys.version_info[:2]
BREAKPOINTS_FILE_NAME = "breakpoints-%d.%d" % sys.version_info[:2]
_config_ = [None]
def load_config():
# Only ever do this once
if _config_[0] is not None:
return _config_[0]
from os.path import join, isdir
cparser = ConfigParser()
conf_dict = {}
try:
cparser.read([
join(cdir, XDG_CONF_RESOURCE, CONF_FILE_NAME)
for cdir in XDG_CONFIG_DIRS if isdir(cdir)])
if cparser.has_section(CONF_SECTION):
conf_dict.update(dict(cparser.items(CONF_SECTION)))
except Exception:
settings_log.exception("Failed to load config")
conf_dict.setdefault("shell", "internal")
conf_dict.setdefault("theme", "classic")
conf_dict.setdefault("line_numbers", "False")
conf_dict.setdefault("seen_welcome", "a")
conf_dict.setdefault("sidebar_width", 0.5)
conf_dict.setdefault("variables_weight", 1)
conf_dict.setdefault("stack_weight", 1)
conf_dict.setdefault("breakpoints_weight", 1)
conf_dict.setdefault("current_stack_frame", "top")
conf_dict.setdefault("stringifier", "default")
conf_dict.setdefault("custom_theme", "")
conf_dict.setdefault("custom_stringifier", "")
conf_dict.setdefault("custom_shell", "")
conf_dict.setdefault("wrap_variables", "True")
conf_dict.setdefault("default_variables_access_level", "public")
conf_dict.setdefault("display", "auto")
conf_dict.setdefault("prompt_on_quit", "True")
conf_dict.setdefault("hide_cmdline_win", "False")
def normalize_bool_inplace(name):
try:
if conf_dict[name].lower() in ["0", "false", "off"]:
conf_dict[name] = False
else:
conf_dict[name] = True
except Exception:
settings_log.exception("Failed to process config")
normalize_bool_inplace("line_numbers")
normalize_bool_inplace("wrap_variables")
normalize_bool_inplace("prompt_on_quit")
normalize_bool_inplace("hide_cmdline_win")
_config_[0] = conf_dict
return conf_dict
def save_config(conf_dict):
from os.path import join
cparser = ConfigParser()
cparser.add_section(CONF_SECTION)
for key in sorted(conf_dict):
cparser.set(CONF_SECTION, key, str(conf_dict[key]))
try:
save_path = get_save_config_path()
if not save_path:
return
outf = open(join(save_path, CONF_FILE_NAME), "w")
cparser.write(outf)
outf.close()
except Exception:
settings_log.exception("Failed to save config")
def edit_config(ui, conf_dict):
import urwid
old_conf_dict = conf_dict.copy()
def _update_theme():
ui.setup_palette(ui.screen)
ui.screen.clear()
def _update_line_numbers():
for sl in ui.source:
sl._invalidate()
def _update_prompt_on_quit():
pass
def _update_hide_cmdline_win():
ui.update_cmdline_win()
def _update_current_stack_frame():
ui.update_stack()
def _update_stringifier():
import pudb.var_view
pudb.var_view.custom_stringifier_dict = {}
ui.update_var_view()
def _update_default_variables_access_level():
ui.update_var_view()
def _update_wrap_variables():
ui.update_var_view()
def _update_config(check_box, new_state, option_newvalue):
option, newvalue = option_newvalue
new_conf_dict = {option: newvalue}
if option == "theme":
# only activate if the new state of the radio button is 'on'
if new_state:
if newvalue is None:
# Select the custom theme entry dialog
lb.set_focus(lb_contents.index(theme_edit_list_item))
return
conf_dict.update(theme=newvalue)
_update_theme()
elif option == "line_numbers":
new_conf_dict["line_numbers"] = not check_box.get_state()
conf_dict.update(new_conf_dict)
_update_line_numbers()
elif option == "prompt_on_quit":
new_conf_dict["prompt_on_quit"] = not check_box.get_state()
conf_dict.update(new_conf_dict)
_update_prompt_on_quit()
elif option == "hide_cmdline_win":
new_conf_dict["hide_cmdline_win"] = not check_box.get_state()
conf_dict.update(new_conf_dict)
_update_hide_cmdline_win()
elif option == "current_stack_frame":
# only activate if the new state of the radio button is 'on'
if new_state:
conf_dict.update(new_conf_dict)
_update_current_stack_frame()
elif option == "stringifier":
# only activate if the new state of the radio button is 'on'
if new_state:
if newvalue is None:
lb.set_focus(lb_contents.index(stringifier_edit_list_item))
return
conf_dict.update(stringifier=newvalue)
_update_stringifier()
elif option == "default_variables_access_level":
# only activate if the new state of the radio button is 'on'
if new_state:
conf_dict.update(default_variables_access_level=newvalue)
_update_default_variables_access_level()
elif option == "wrap_variables":
new_conf_dict["wrap_variables"] = not check_box.get_state()
conf_dict.update(new_conf_dict)
_update_wrap_variables()
heading = urwid.Text("This is the preferences screen for PuDB. "
"Hit Ctrl-P at any time to get back to it.\n\n"
"Configuration settings are saved in "
"$HOME/.config/pudb or $XDG_CONFIG_HOME/pudb "
"environment variable. If both variables are not set "
"configurations settings will not be saved.\n")
cb_line_numbers = urwid.CheckBox("Show Line Numbers",
bool(conf_dict["line_numbers"]), on_state_change=_update_config,
user_data=("line_numbers", None))
cb_prompt_on_quit = urwid.CheckBox("Prompt before quitting",
bool(conf_dict["prompt_on_quit"]), on_state_change=_update_config,
user_data=("prompt_on_quit", None))
hide_cmdline_win = urwid.CheckBox("Hide command line (Ctrl-X) window "
"when not in use",
bool(conf_dict["hide_cmdline_win"]), on_state_change=_update_config,
user_data=("hide_cmdline_win", None))
# {{{ shells
shell_info = urwid.Text("This is the shell that will be "
"used when you hit '!'.\n")
shells = ["internal", "classic", "ipython", "ipython_kernel", "bpython",
"ptpython", "ptipython"]
known_shell = conf_dict["shell"] in shells
shell_edit = urwid.Edit(edit_text=conf_dict["custom_shell"])
shell_edit_list_item = urwid.AttrMap(shell_edit, "value")
shell_rb_group = []
shell_rbs = [
urwid.RadioButton(shell_rb_group, name,
conf_dict["shell"] == name)
for name in shells]+[
urwid.RadioButton(shell_rb_group, "Custom:",
not known_shell, on_state_change=_update_config,
user_data=("shell", None)),
shell_edit_list_item,
urwid.Text("\nTo use a custom shell, see example-shell.py "
"in the pudb distribution. Enter the full path to a "
"file like it in the box above. '~' will be expanded "
"to your home directory. The file should contain a "
"function called pudb_shell(_globals, _locals) "
"at the module level. See the PuDB documentation for "
"more information."),
]
# }}}
# {{{ themes
from pudb.theme import THEMES
known_theme = conf_dict["theme"] in THEMES
theme_rb_group = []
theme_edit = urwid.Edit(edit_text=conf_dict["custom_theme"])
theme_edit_list_item = urwid.AttrMap(theme_edit, "value")
theme_rbs = [
urwid.RadioButton(theme_rb_group, name,
conf_dict["theme"] == name, on_state_change=_update_config,
user_data=("theme", name))
for name in THEMES]+[
urwid.RadioButton(theme_rb_group, "Custom:",
not known_theme, on_state_change=_update_config,
user_data=("theme", None)),
theme_edit_list_item,
urwid.Text("\nTo use a custom theme, see example-theme.py in the "
"pudb distribution. Enter the full path to a file like it in "
"the box above. '~' will be expanded to your home directory. "
"Note that a custom theme will not be applied until you close "
"this dialog."),
]
# }}}
# {{{ stack
stack_rb_group = []
stack_opts = ["top", "bottom"]
stack_info = urwid.Text("Show the current stack frame at the\n")
stack_rbs = [
urwid.RadioButton(stack_rb_group, name,
conf_dict["current_stack_frame"] == name,
on_state_change=_update_config,
user_data=("current_stack_frame", name))
for name in stack_opts
]
# }}}
# {{{ stringifier
from pudb.var_view import STRINGIFIERS
stringifier_opts = list(STRINGIFIERS.keys())
known_stringifier = conf_dict["stringifier"] in stringifier_opts
stringifier_rb_group = []
stringifier_edit = urwid.Edit(edit_text=conf_dict["custom_stringifier"])
stringifier_info = urwid.Text(
"This is the default function that will be called on variables in the "
"variables list. You can also change this on a per-variable basis by "
"selecting a variable and typing 'e' to edit the variable's display "
"settings, or by typing one of d/t/r/s/i/c. Note that str and repr will "
"be slower than the default, type, or id stringifiers.\n")
stringifier_edit_list_item = urwid.AttrMap(stringifier_edit, "value")
stringifier_rbs = [
urwid.RadioButton(stringifier_rb_group, name,
conf_dict["stringifier"] == name,
on_state_change=_update_config,
user_data=("stringifier", name))
for name in stringifier_opts
]+[
urwid.RadioButton(stringifier_rb_group, "Custom:",
not known_stringifier, on_state_change=_update_config,
user_data=("stringifier", None)),
stringifier_edit_list_item,
urwid.Text("\nTo use a custom stringifier, see "
"example-stringifier.py in the pudb distribution. Enter the "
"full path to a file like it in the box above. "
"'~' will be expanded to your home directory. "
"The file should contain a function called pudb_stringifier() "
"at the module level, which should take a single argument and "
"return the desired string form of the object passed to it. "
"Note that if you choose a custom stringifier, the variables "
"view will not be updated until you close this dialog."),
]
# }}}
# {{{ variables access level
default_variables_access_level_opts = ["public", "private", "all"]
default_variables_access_level_rb_group = []
default_variables_access_level_info = urwid.Text(
"Set the default attribute visibility "
"of variables in the variables list.\n"
"\nNote that you can change this option on "
"a per-variable basis by selecting the "
"variable and pressing '*'.")
default_variables_access_level_rbs = [
urwid.RadioButton(default_variables_access_level_rb_group, name,
conf_dict["default_variables_access_level"] == name,
on_state_change=_update_config,
user_data=("default_variables_access_level", name))
for name in default_variables_access_level_opts
]
# }}}
# {{{ wrap variables
cb_wrap_variables = urwid.CheckBox("Wrap variables",
bool(conf_dict["wrap_variables"]), on_state_change=_update_config,
user_data=("wrap_variables", None))
wrap_variables_info = urwid.Text("\nNote that you can change this option on "
"a per-variable basis by selecting the "
"variable and pressing 'w'.")
# }}}
# {{{ display
display_info = urwid.Text("What driver is used to talk to your terminal. "
"'raw' has the most features (colors and highlighting), "
"but is only correct for "
"XTerm and terminals like it. 'curses' "
"has fewer "
"features, but it will work with just about any terminal. 'auto' "
"will attempt to pick between the two based on availability and "
"the $TERM environment variable.\n\n"
"Changing this setting requires a restart of PuDB.")
displays = ["auto", "raw", "curses"]
display_rb_group = []
display_rbs = [
urwid.RadioButton(display_rb_group, name,
conf_dict["display"] == name)
for name in displays]
# }}}
lb_contents = (
[heading]
+ [urwid.AttrMap(urwid.Text("General:\n"), "group head")]
+ [cb_line_numbers]
+ [cb_prompt_on_quit]
+ [hide_cmdline_win]
+ [urwid.AttrMap(urwid.Text("\nShell:\n"), "group head")]
+ [shell_info]
+ shell_rbs
+ [urwid.AttrMap(urwid.Text("\nTheme:\n"), "group head")]
+ theme_rbs
+ [urwid.AttrMap(urwid.Text("\nStack Order:\n"), "group head")]
+ [stack_info]
+ stack_rbs
+ [urwid.AttrMap(urwid.Text("\nVariable Stringifier:\n"), "group head")]
+ [stringifier_info]
+ stringifier_rbs
+ [urwid.AttrMap(urwid.Text("\nVariables Attribute Visibility:\n"),
"group head")]
+ [default_variables_access_level_info]
+ default_variables_access_level_rbs
+ [urwid.AttrMap(urwid.Text("\nWrap Variables:\n"), "group head")]
+ [cb_wrap_variables]
+ [wrap_variables_info]
+ [urwid.AttrMap(urwid.Text("\nDisplay driver:\n"), "group head")]
+ [display_info]
+ display_rbs
)
lb = urwid.ListBox(urwid.SimpleListWalker(lb_contents))
if ui.dialog(lb, [
("OK", True),
("Cancel", False),
],
title="Edit Preferences"):
# Only update the settings here that instant-apply (above) doesn't take
# care of.
# if we had a custom theme, it wasn't updated live
if theme_rb_group[-1].state:
newvalue = theme_edit.get_edit_text()
conf_dict.update(theme=newvalue, custom_theme=newvalue)
_update_theme()
# Ditto for custom stringifiers
if stringifier_rb_group[-1].state:
newvalue = stringifier_edit.get_edit_text()
conf_dict.update(stringifier=newvalue, custom_stringifier=newvalue)
_update_stringifier()
if shell_rb_group[-1].state:
newvalue = shell_edit.get_edit_text()
conf_dict.update(shell=newvalue, custom_shell=newvalue)
else:
for shell, shell_rb in zip(shells, shell_rbs):
if shell_rb.get_state():
conf_dict["shell"] = shell
for display, display_rb in zip(displays, display_rbs):
if display_rb.get_state():
conf_dict["display"] = display
else: # The user chose cancel, revert changes
conf_dict.update(old_conf_dict)
_update_theme()
# _update_line_numbers() is equivalent to _update_theme()
_update_current_stack_frame()
_update_stringifier()
# {{{ breakpoint saving
def parse_breakpoints(lines):
# b [ (filename:lineno | function) [, "condition"] ]
breakpoints = []
for arg in lines:
if not arg:
continue
arg = arg[1:]
filename = None
lineno = None
cond = None
comma = arg.find(",")
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
colon = arg.rfind(":")
funcname = None
if colon > 0:
filename = arg[:colon].strip()
f = lookup_module(filename)
if not f:
continue
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError:
continue
else:
continue
if get_breakpoint_invalid_reason(filename, lineno) is None:
breakpoints.append((filename, lineno, False, cond, funcname))
return breakpoints
def get_breakpoints_file_name():
from os.path import join
save_path = get_save_config_path()
if not save_path:
return None
else:
return join(save_path, SAVED_BREAKPOINTS_FILE_NAME)
def load_breakpoints():
"""
Loads and check saved breakpoints out from files
Returns: list of tuples
"""
from os.path import join, isdir
file_names = []
for cdir in XDG_CONFIG_DIRS:
if isdir(cdir):
for name in [SAVED_BREAKPOINTS_FILE_NAME, BREAKPOINTS_FILE_NAME]:
file_names.append(join(cdir, XDG_CONF_RESOURCE, name))
lines = []
for fname in file_names:
try:
rc_file = open(fname)
except OSError:
pass
else:
lines.extend([line.strip() for line in rc_file.readlines()])
rc_file.close()
return parse_breakpoints(lines)
def save_breakpoints(bp_list):
"""
:arg bp_list: a list of `bdb.Breakpoint` objects
"""
save_path = get_breakpoints_file_name()
if not save_path:
return
histfile = open(get_breakpoints_file_name(), "w")
bp_list = {(bp.file, bp.line, bp.cond) for bp in bp_list}
for bp in bp_list:
line = "b %s:%d" % (bp[0], bp[1])
if bp[2]:
line += ", %s" % bp[2]
line += "\n"
histfile.write(line)
histfile.close()
# }}}
# vim:foldmethod=marker
| 34.311183
| 84
| 0.611242
|
0c6aa0b1bb393485828a6ca1c2e9dc2570518096
| 153
|
py
|
Python
|
news/admin.py
|
alcibiadesBustillo/resumen-news
|
62f2cf821e619703e058d2b996ebf2513854bb73
|
[
"MIT"
] | 1
|
2021-07-09T07:17:17.000Z
|
2021-07-09T07:17:17.000Z
|
news/admin.py
|
alcibiadesBustillo/resumen-news
|
62f2cf821e619703e058d2b996ebf2513854bb73
|
[
"MIT"
] | null | null | null |
news/admin.py
|
alcibiadesBustillo/resumen-news
|
62f2cf821e619703e058d2b996ebf2513854bb73
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import News
# Register your models here.
@admin.register(News)
class NewsAdmin(admin.ModelAdmin):
pass
| 21.857143
| 34
| 0.784314
|
37d7ee2c1e9b74419a6e83877774e41c506ea88a
| 1,019
|
py
|
Python
|
src/Reset Ease Automatically/__init__.py
|
RisingOrange/Reset-Ease-Automatically
|
7c2fd16b7cac32ba499d87f681c75cfcfb617405
|
[
"MIT"
] | 5
|
2020-09-06T10:51:39.000Z
|
2021-11-11T01:46:06.000Z
|
src/Reset Ease Automatically/__init__.py
|
RisingOrange/Reset-Ease-Automatically
|
7c2fd16b7cac32ba499d87f681c75cfcfb617405
|
[
"MIT"
] | 6
|
2020-09-06T11:28:47.000Z
|
2021-06-13T00:22:03.000Z
|
src/Reset Ease Automatically/__init__.py
|
RisingOrange/Reset-Ease-Automatically
|
7c2fd16b7cac32ba499d87f681c75cfcfb617405
|
[
"MIT"
] | 1
|
2020-09-06T10:52:43.000Z
|
2020-09-06T10:52:43.000Z
|
from anki.hooks import addHook
from aqt import mw
from aqt.utils import tooltip
from PyQt5.QtWidgets import *
from . import preferences_dialog
from .reset_ease import reset_ease
from .store_restore_ease import add_deck_options
def main():
setup_toolbar_menu()
addHook('unloadProfile', reset_ease_and_show_message)
addHook('showDeckOptions', add_deck_options)
mw.addonManager.setConfigAction(__name__, preferences_dialog.show)
def setup_toolbar_menu():
# Add "reset ease" submenu
reset_ease_menu = QMenu("Reset Ease", mw)
mw.form.menuTools.addMenu(reset_ease_menu)
# Add Preferences button
a = QAction('&Preferences', mw)
a.triggered.connect(preferences_dialog.show)
reset_ease_menu.addAction(a)
# Reset Ease button
a = QAction('&Reset Ease', mw)
a.triggered.connect(reset_ease_and_show_message)
reset_ease_menu.addAction(a)
def reset_ease_and_show_message():
reset_ease()
tooltip("Ease factors have been reset", period=1200)
main()
| 25.475
| 70
| 0.747792
|
c95539e24d49b17eb6f1423afa669e17b4fcd237
| 29,280
|
py
|
Python
|
tagger.py
|
sjyttkl/convseg
|
4cd970283e128be38c1588820f741d781b709511
|
[
"MIT"
] | 1
|
2018-12-10T02:20:38.000Z
|
2018-12-10T02:20:38.000Z
|
tagger.py
|
sjyttkl/convseg
|
4cd970283e128be38c1588820f741d781b709511
|
[
"MIT"
] | null | null | null |
tagger.py
|
sjyttkl/convseg
|
4cd970283e128be38c1588820f741d781b709511
|
[
"MIT"
] | null | null | null |
# from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.layers as layers
import tensorflow.contrib.crf as crf
import time
import codecs
import os
import pickle
import numpy as np
# from itertools import izip
INT_TYPE = np.int32
FLOAT_TYPE = np.float32
################################################################################
# Model #
################################################################################
class Model(object):
def __init__(self, scope, sess):
self.scope = scope
self.sess = sess
#建立输入图,
def build_input_graph(self, vocab_size, emb_size, word_vocab_size, word_emb_size, word_window_size):
"""
Gather embeddings from lookup tables.
"""
seq_ids = tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='seq_ids')
seq_word_ids = [tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='seq_feature_%d_ids' % i)
for i in range(word_window_size)]
embeddings = tf.get_variable('embeddings', [vocab_size, emb_size])
embedding_output = tf.nn.embedding_lookup([embeddings], seq_ids)
word_outputs = []
word_embeddings = tf.get_variable('word_embeddings', [word_vocab_size, word_emb_size])
for i in range(word_window_size):
word_outputs.append(tf.nn.embedding_lookup([word_embeddings], seq_word_ids[i]))
return seq_ids, seq_word_ids, tf.concat([embedding_output] + word_outputs, 2, 'inputs')
def build_tagging_graph(self, inputs, hidden_layers, channels, num_tags, use_crf, lamd, dropout_emb,
dropout_hidden, kernel_size, use_bn, use_wn, active_type):
"""
Build a deep neural model for sequence tagging.
"""
stag_ids = tf.placeholder(dtype=INT_TYPE, shape=[None, None], name='stag_ids')
seq_lengths = tf.placeholder(dtype=INT_TYPE, shape=[None], name='seq_lengths')
# Default is not train.
is_train = tf.placeholder(dtype=tf.bool, shape=[], name='is_train')
masks = tf.cast(tf.sequence_mask(seq_lengths), FLOAT_TYPE)
# Dropout on embedding output.
if dropout_emb:
inputs = tf.cond(is_train,
lambda: tf.nn.dropout(inputs, 1 - dropout_emb),
lambda: inputs) #这里的tf.cond就是if 和 else操作
hidden_output = inputs
pre_channels = inputs.get_shape()[-1].value
for i in range(hidden_layers):
k = kernel_size
cur_channels = channels[i]
filter_w = tf.get_variable('filter_w_%d' % i, shape=[k, pre_channels, cur_channels], dtype=FLOAT_TYPE)
filter_v = tf.get_variable('filter_v_%d' % i, shape=[k, pre_channels, cur_channels], dtype=FLOAT_TYPE)
bias_b = tf.get_variable('bias_b_%d' % i, shape=[cur_channels],
initializer=tf.zeros_initializer(dtype=FLOAT_TYPE))
bias_c = tf.get_variable('bias_c_%d' % i, shape=[cur_channels],
initializer=tf.zeros_initializer(dtype=FLOAT_TYPE))
# Weight normalization.
if use_wn:
epsilon = 1e-12
g_w = tf.get_variable('g_w_%d' % i, shape=[k, 1, cur_channels], dtype=FLOAT_TYPE)
g_v = tf.get_variable('g_v_%d' % i, shape=[k, 1, cur_channels], dtype=FLOAT_TYPE)
# Perform wn
filter_w = g_w * filter_w / (tf.sqrt(tf.reduce_sum(filter_w ** 2, 1, keep_dims=True)) + epsilon)
filter_v = g_v * filter_v / (tf.sqrt(tf.reduce_sum(filter_v ** 2, 1, keep_dims=True)) + epsilon)
w = tf.nn.conv1d(hidden_output, filter_w, 1, 'SAME') + bias_b
v = tf.nn.conv1d(hidden_output, filter_v, 1, 'SAME') + bias_c
if use_bn:
w = layers.batch_norm(inputs=v, decay=0.9, is_training=is_train, center=True, scale=True,
scope='BatchNorm_w_%d' % i)
v = layers.batch_norm(inputs=w, decay=0.9, is_training=is_train, center=True, scale=True,
scope='BatchNorm_v_%d' % i)
if active_type == 'glu':
hidden_output = w * tf.nn.sigmoid(v)
elif active_type == 'relu':
hidden_output = tf.nn.relu(w)
elif active_type == 'gtu':
hidden_output = tf.tanh(w) * tf.nn.sigmoid(v)
elif active_type == 'tanh':
hidden_output = tf.tanh(w)
elif active_type == 'linear':
hidden_output = w
elif active_type == 'bilinear':
hidden_output = w * v
# Mask paddings.
hidden_output = hidden_output * tf.expand_dims(masks, -1)
# Dropout on hidden output.
if dropout_hidden:
hidden_output = tf.cond(is_train,
lambda: tf.nn.dropout(hidden_output, 1 - dropout_hidden),
lambda: hidden_output
)
pre_channels = cur_channels
# Un-scaled log probabilities.
scores = layers.fully_connected(hidden_output, num_tags, tf.identity)
if use_crf:
cost, transitions = crf.crf_log_likelihood(inputs=scores, tag_indices=stag_ids,
sequence_lengths=seq_lengths)
cost = - tf.reduce_mean(cost)
else:
reshaped_scores = tf.reshape(scores, [-1, num_tags])
reshaped_stag_ids = tf.reshape(stag_ids, [-1])
real_distribution = layers.one_hot_encoding(reshaped_stag_ids, num_tags)
cost = tf.nn.softmax_cross_entropy_with_logits(reshaped_scores, real_distribution)
cost = tf.reduce_sum(tf.reshape(cost, tf.shape(stag_ids)) * masks) / tf.cast(tf.shape(inputs)[0],
FLOAT_TYPE)
# Calculate L2 penalty.
l2_penalty = 0
if lamd > 0:
for v in tf.trainable_variables():
if '/B:' not in v.name and '/biases:' not in v.name:
l2_penalty += lamd * tf.nn.l2_loss(v)
train_cost = cost + l2_penalty
# Summary cost.
tf.summary.scalar('cost', cost)
summaries = tf.summary.merge_all()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
with tf.control_dependencies([updates]):
cost = tf.identity(cost)
return stag_ids, seq_lengths, is_train, cost, train_cost, scores, summaries
def build_graph(self):
parameters = self.parameters
with tf.variable_scope(name_or_scope=self.scope, initializer=tf.uniform_unit_scaling_initializer()):
seq_ids_pl, seq_other_ids_pls, inputs = self.build_input_graph(vocab_size=parameters['vocab_size'],
emb_size=parameters['emb_size'],
word_window_size=parameters['word_window_size'],
word_vocab_size=parameters['word_vocab_size'],
word_emb_size=parameters['word_emb_size'])
stag_ids_pl, seq_lengths_pl, is_train_pl, cost_op, train_cost_op, scores_op, summary_op = \
self.build_tagging_graph(inputs=inputs,
num_tags=parameters['num_tags'],
use_crf=parameters['use_crf'],
lamd=parameters['lamd'],
dropout_emb=parameters['dropout_emb'],
dropout_hidden=parameters['dropout_hidden'],
hidden_layers=parameters['hidden_layers'],
channels=parameters['channels'],
kernel_size=parameters['kernel_size'],
use_bn=parameters['use_bn'],
use_wn=parameters['use_wn'],
active_type=parameters['active_type'])
self.seq_ids_pl = seq_ids_pl
self.seq_other_ids_pls = seq_other_ids_pls
self.stag_ids_pl = stag_ids_pl
self.seq_lengths_pl = seq_lengths_pl
self.is_train_pl = is_train_pl
self.cost_op = cost_op
self.train_cost_op = train_cost_op
self.scores_op = scores_op
self.summary_op = summary_op
def inference(self, scores, sequence_lengths=None):
"""
Inference label sequence given scores.
If transitions is given, then perform veterbi search, else perform greedy search.
Args:
scores: A numpy array with shape (batch, max_length, num_tags).
sequence_lengths: A numpy array with shape (batch,).
Returns:
A numpy array with shape (batch, max_length).
"""
if not self.parameters['use_crf']:
return np.argmax(scores, 2)
else:
with tf.variable_scope(self.scope, reuse=True):
transitions = tf.get_variable('transitions').eval(session=self.sess)
paths = np.zeros(scores.shape[:2], dtype=INT_TYPE)
for i in range(scores.shape[0]):
tag_score, length = scores[i], sequence_lengths[i]
if length == 0:
continue
path, _ = crf.viterbi_decode(tag_score[:length], transitions)
paths[i, :length] = path
return paths
def train(self, train_data, dev_data, test_data, model_dir, log_dir, emb_size, word_emb_size, optimizer,
hidden_layers, channels, kernel_size, active_type, use_bn, use_wn, use_crf, lamd, dropout_emb,
dropout_hidden, evaluator, batch_size, eval_batch_size, pre_trained_emb_path, fix_word_emb,
reserve_all_word_emb, pre_trained_word_emb_path, max_epoches, print_freq):
"""
This function is the main function for preparing data and training the model.
"""
assert len(channels) == hidden_layers
# Parse optimization method and parameters.
optimizer = optimizer.split('_')
optimizer_name = optimizer[0]
optimizer_options = [eval(i) for i in optimizer[1:]]
optimizer = {
'sgd': tf.train.GradientDescentOptimizer,
'adadelta': tf.train.AdadeltaOptimizer,
'adam': tf.train.AdamOptimizer,
'mom': tf.train.MomentumOptimizer
}[optimizer_name](*optimizer_options) #这一步就是直接进行初始化了 ,因为optimizer_options是list,需要*打开
print('Preparing data...', end='')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
mappings_path = os.path.join(model_dir, 'mappings.pkl')
parameters_path = os.path.join(model_dir, 'parameters.pkl')
# Load character embeddings.
pre_trained = {}
if pre_trained_emb_path and os.path.isfile(pre_trained_emb_path):#预训练 embeding练模型
for l in codecs.open(pre_trained_emb_path, 'r', 'utf8'):
we = l.split()
if len(we) == emb_size + 1:
w, e = we[0], np.array(map(float, we[1:]))
pre_trained[w] = e
# Load word embeddings.
pre_trained_word = {}
if pre_trained_word_emb_path and os.path.isfile(pre_trained_word_emb_path):#预训练字embeding练模型
for l in codecs.open(pre_trained_word_emb_path, 'r', 'utf8', 'ignore'):
we = l.split()
if len(we) == word_emb_size + 1:
w, e = we[0], np.array(map(float, we[1:]))
pre_trained_word[w] = e
# Load or create mappings.
if os.path.isfile(mappings_path):
print(mappings_path)
b = open(mappings_path, 'r',encoding="utf-8")
print(b,type(b))
item2id, id2item, tag2id, id2tag, word2id, id2word = pickle.load(open(mappings_path, 'rb')) #python3 需要改为 rb
else:
# train_data [([word],[word]),([tag],[tag])]
# print(train_data,type(train_data))
b = create_dic(train_data[0], add_unk=True, add_pad=True)#返回每个字的词频,以字典形式保存
item2id, id2item = create_mapping(b)#文字和id的映射
tag2id, id2tag = create_mapping(create_dic(train_data[-1]))#tag到id的映射
#下面的一段代码,很奇怪。
words = []
for t in train_data[1:-1]:
words.extend(t)
for t in dev_data[1:-1]:
words.extend(t)
for t in test_data[1:-1]:
words.extend(t)
word_dic = create_dic(words, add_unk=True, add_pad=True)
for k in word_dic.keys():
if k not in pre_trained_word and k != '<UNK>' and k != '<PAD>':
word_dic.pop(k)
if reserve_all_word_emb:
for w in pre_trained_word:
if w not in word_dic:
word_dic[w] = 0
word2id, id2word = create_mapping(word_dic)
# Save the mappings to disk.
pickle.dump((item2id, id2item, tag2id, id2tag, word2id, id2word), open(mappings_path, 'wb')) #python3,需要改为wb
# Hyper parameters.
word_window_size = len(train_data) - 2
parameters = {
'vocab_size': len(item2id),
'emb_size': emb_size,
'word_window_size': word_window_size,
'word_vocab_size': len(word2id),
'word_emb_size': word_emb_size,
'hidden_layers': hidden_layers,
'channels': channels,
'kernel_size': kernel_size,
'use_bn': use_bn,
'use_wn': use_wn,
'num_tags': len(tag2id),
'use_crf': use_crf,
'lamd': lamd,
'dropout_emb': dropout_emb,
'dropout_hidden': dropout_hidden,
'active_type': active_type
}
if os.path.isfile(parameters_path):
parameters_old = pickle.load(open(parameters_path, 'rb'))
if parameters != parameters_old:
raise Exception('Network parameters are not consistent!')
else:
pickle.dump(parameters, open(parameters_path, 'wb'))
self.item2id = item2id
self.id2item = id2item
self.tag2id = tag2id
self.id2tag = id2tag
self.word2id = word2id
self.id2word = id2word
self.parameters = parameters
# Convert data to corresponding ids.
train_data_ids = data_to_ids(
train_data, [item2id] + [word2id] * word_window_size + [tag2id]
)
print('Finished.')
print("Start building the network...", end='')
self.build_graph()
print('Finished.')
def summary(name, dtype=FLOAT_TYPE):
value = tf.placeholder(dtype, shape=[])
return value, tf.summary.scalar(name, value)
dev_f1_pl, dev_summary_op = summary('dev f1')
test_f1_pl, test_summary_op = summary('test f1')
# Clip gradients and apply.
grads_and_vars = optimizer.compute_gradients(loss=self.train_cost_op, var_list=tf.trainable_variables())
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
# If use fixed word embeddings, remove the grad
if fix_word_emb:
grads_and_vars = [(g, v) for g, v in grads_and_vars if '/word_embeddings' not in v.name]
grads_summary_op = tf.summary.histogram('grads', tf.concat([tf.reshape(g, [-1]) for g, _ in grads_and_vars], 0))
grads_norm = tf.sqrt(sum([tf.reduce_sum(tf.pow(g, 2)) for g, _ in grads_and_vars]))
grads_and_vars = [(g / (tf.reduce_max([grads_norm, 5]) / 5), v) for g, v in grads_and_vars]
train_op = optimizer.apply_gradients(grads_and_vars)
# Variables for recording training procedure.
best_epoch = tf.get_variable('best_epoch', shape=[], initializer=tf.zeros_initializer(), trainable=False,
dtype=INT_TYPE)
best_step = tf.get_variable('best_step', shape=[], initializer=tf.zeros_initializer(), trainable=False,
dtype=INT_TYPE)
best_dev_score = tf.get_variable('best_dev_score', shape=[], initializer=tf.zeros_initializer(),
trainable=False, dtype=FLOAT_TYPE)
best_test_score = tf.get_variable('best_test_score', shape=[], initializer=tf.zeros_initializer(),
trainable=False, dtype=FLOAT_TYPE)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver(tf.global_variables())
summary_writer = tf.summary.FileWriter(log_dir + '/summaries')
print('Finished.')
print('Start training the network...')
self.sess.run(init_op)
start_time_begin = time.time()
try:
checkpoint = tf.train.latest_checkpoint(model_dir)
saver.restore(self.sess, checkpoint)
print('Restore model from %s.' % checkpoint)
except (tf.errors.DataLossError, TypeError, Exception):
# Failed to restore model from disk. Load pre-trained embeddings.
# Load character embeddings.
with tf.variable_scope(self.scope, reuse=True):
embeddings = tf.get_variable('embeddings')
value = self.sess.run(embeddings)
count = 0
for item in item2id:
item_id = item2id[item]
if item in pre_trained:
value[item_id] = pre_trained[item]
count += 1
# Run assign op.
self.sess.run(embeddings.assign(value))
del (pre_trained)
print('%d of %d character embeddings were loaded from pre-trained.' % (count, len(item2id)))
# Load word embeddings.
with tf.variable_scope(self.scope, reuse=True):
word_embeddings = tf.get_variable('word_embeddings')
value = self.sess.run(word_embeddings)
count = 0
for item in word2id:
item_id = word2id[item]
if item in pre_trained_word:
value[item_id] = pre_trained_word[item]
count += 1
# Run assign op.
self.sess.run(word_embeddings.assign(value))
del (pre_trained_word)
print('%d of %d word embeddings were loaded from pre-trained.' % (count, len(word2id)))
start_epoch, global_step, best_dev_f1 = self.sess.run((best_epoch, best_step, best_dev_score))
for epoch in range(start_epoch + 1, max_epoches + 1):
print('Starting epoch %d...' % epoch)
start_time = time.time()
loss_ep = 0
n_step = 0
iterator = data_iterator(train_data_ids, batch_size, shuffle=True)
for batch in iterator:
batch = create_input(batch)
seq_ids, seq_other_ids_list, stag_ids, seq_lengths = batch[0], batch[1: -2], batch[-2], batch[-1]
feed_dict = {self.seq_ids_pl: seq_ids.astype(INT_TYPE),
self.stag_ids_pl: stag_ids.astype(INT_TYPE),
self.seq_lengths_pl: seq_lengths.astype(INT_TYPE),
self.is_train_pl: True}
assert len(self.seq_other_ids_pls) == len(seq_other_ids_list)
for pl, v in zip(self.seq_other_ids_pls, seq_other_ids_list):
feed_dict[pl] = v
# feed_dict.update(drop_feed_dict) # enable noise input
loss, summaries, grads_summaries, _ = self.sess.run(
[self.cost_op, self.summary_op, grads_summary_op, train_op],
feed_dict=feed_dict)
loss_ep += loss
n_step += 1
global_step += 1
summary_writer.add_summary(summaries, global_step)
summary_writer.add_summary(grads_summaries, global_step)
# Show training information.
if global_step % print_freq == 0:
print(' Step %d, current cost %.6f, average cost %.6f' % (global_step, loss, loss_ep / n_step))
loss_ep = loss_ep / n_step
print('Epoch %d finished. Time: %ds Cost: %.6f' % (epoch, time.time() - start_time, loss_ep))
# Evaluate precision, recall and f1 with an external script.
dev_pre, dev_rec, dev_f1 = \
evaluator((dev_data[0], dev_data[-1], self.tag_all(dev_data[:-1], eval_batch_size)[1]),
log_dir + '/dev', epoch)
test_pre, test_rec, test_f1 = \
evaluator((test_data[0], test_data[-1], self.tag_all(test_data[:-1], eval_batch_size)[1]),
log_dir + '/test', epoch)
# Summary dev and test F1 score.
summary_writer.add_summary(self.sess.run(dev_summary_op, {dev_f1_pl: dev_f1}), epoch)
summary_writer.add_summary(self.sess.run(test_summary_op, {test_f1_pl: test_f1}), epoch)
print("Dev precision / recall / f1 score: %.2f / %.2f / %.2f" %
(dev_pre * 100, dev_rec * 100, dev_f1 * 100))
print("Test precision / recall / f1 score: %.2f / %.2f / %.2f" %
(test_pre * 100, test_rec * 100, test_f1 * 100))
if dev_f1 > best_dev_f1:
best_dev_f1 = dev_f1
self.sess.run((tf.assign(best_epoch, epoch),
tf.assign(best_dev_score, dev_f1),
tf.assign(best_test_score, test_f1),
tf.assign(best_step, global_step)))
path = saver.save(self.sess, model_dir + '/model', epoch)
print('New best score on dev.')
print('Save model at %s.' % path)
print('Finished.')
print('Total training time: %fs.' % (time.time() - start_time_begin))
def load_model(self, model_dir):
mappings_path = os.path.join(model_dir, 'mappings.pkl')
parameters_path = os.path.join(model_dir, 'parameters.pkl')
item2id, id2item, tag2id, id2tag, word2id, id2word = \
pickle.load(open(mappings_path, 'r'))
parameters = pickle.load(open(parameters_path))
self.item2id = item2id
self.id2item = id2item
self.tag2id = tag2id
self.id2tag = id2tag
self.word2id = word2id
self.id2word = id2word
self.parameters = parameters
print(parameters)
print('Building input graph...', end='')
self.build_graph()
print('Finished.')
print('Initializing variables...', end='')
init_op = tf.initialize_all_variables()
self.sess.run(init_op)
print('Finished.')
print('Reloading parameters...', end='')
saver = tf.train.Saver(tf.global_variables())
checkpoint = tf.train.latest_checkpoint(model_dir)
saver.restore(self.sess, checkpoint)
print('Finished.')
def tag(self, data_iter):
"""A tagging function.
Args:
data_iter: A iterator for generate batches.
Returns:
A generator for tagging result.
"""
output = []
for data in data_iter:
batch = data_to_ids(data, [self.item2id] + [self.word2id] * self.parameters['word_window_size'])
batch = create_input(batch)
seq_ids, seq_other_ids_list, seq_lengths = batch[0], batch[1: -1], batch[-1]
feed_dict = {self.seq_ids_pl: seq_ids.astype(INT_TYPE),
self.seq_lengths_pl: seq_lengths.astype(INT_TYPE),
self.is_train_pl: False}
for pl, v in zip(self.seq_other_ids_pls, seq_other_ids_list):
feed_dict[pl] = v.astype(INT_TYPE)
scores = self.sess.run(self.scores_op, feed_dict)
stag_ids = self.inference(scores, seq_lengths)
for seq, stag_id, length in zip(data[0], stag_ids, seq_lengths):
output.append((seq, [self.id2tag[t] for t in stag_id[:length]]))
yield zip(*output)
output = []
def tag_all(self, data, batch_size):
data_iter = data_iterator(data, batch_size=batch_size, shuffle=False)
output = []
for b in self.tag(data_iter):
output.extend(zip(*b))
return list(zip(*output))
################################################################################
# DATA UTILS #
################################################################################
#这方法是统计每个字出现的频率
def create_dic(item_list, add_unk=False, add_pad=False):
"""
Create a dictionary of items from a list of list of items.
"""
assert type(item_list) in (list, tuple)
dic = {}
for items in item_list:
for item in items:
if item not in dic:
dic[item] = 1
else:
dic[item] += 1
# Make sure that <PAD> have a id 0.
if add_pad:
dic['<PAD>'] = 1e20
# If specified, add a special item <UNK>.
if add_unk:
dic['<UNK>'] = 1e10
return dic
def create_mapping(items):
"""
Create a mapping (item to ID / ID to item) from a dictionary.
Items are ordered by decreasing frequency.
"""
if type(items) is dict:
sorted_items = sorted(items.items(), key=lambda x: (-x[1], x[0]))# 按照词频从大到小排序,并且返回形式如:(字:词频)
id2item = {i: v[0] for i, v in enumerate(sorted_items)} #返回一个每个字:所在的位置,形式如:{0: '<PAD>', 1: '<UNK>', 2: ',', 3: '的', 4: '。'}
item2id = {v: k for k, v in id2item.items()}#把上面的形式,key变成value,value变成key
return item2id, id2item
elif type(items) is list:
id2item = {i: v for i, v in enumerate(items)}
item2id = {v: k for k, v in id2item.items()}
return item2id, id2item
def create_input(batch):
"""
Take each sentence data in batch and return an input for
the training or the evaluation function.
"""
assert len(batch) > 0
lengths = [len(seq) for seq in batch[0]]
max_len = max(2, max(lengths))
ret = []
for d in batch:
dd = []
for seq_id, pos in zip(d, lengths):
assert len(seq_id) == pos
pad = [0] * (max_len - pos)
dd.append(seq_id + pad)
ret.append(np.array(dd))
ret.append(np.array(lengths))
return ret
def data_to_ids(data, mappings):
"""
Map text data to ids.
"""
#全角转半角
def strQ2B(ustring):
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288:
inside_code = 32
elif 65281 <= inside_code <= 65374:
inside_code -= 65248
rstring += chr(inside_code)
return rstring
#半角转全角
def strB2Q(ustring):
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 32:
inside_code = 12288
elif 32 <= inside_code <= 126:
inside_code += 65248
rstring += chr(inside_code)
return rstring
def map(item, mapping):
if item in mapping:
return mapping[item]
item = strB2Q(item)
if item in mapping:
return mapping[item]
item = strQ2B(item)
if item in mapping:
return mapping[item]
return mapping['<UNK>']
def map_seq(seqsx, mapping):
return [[map(item, mapping) for item in seq] for seq in seqsx]
ret = []
for d, m in zip(data, mappings):
print(np.shape(data),np.shape(mappings))
print(np.shape(d),np.shape(m))
ret.append(map_seq(d, m))
return tuple(ret)
def data_iterator(inputs, batch_size, shuffle=True, max_length=200):
"""
A simple iterator for generating dynamic mini batches.
"""
assert len(inputs) > 0
assert all([len(item) == len(inputs[0]) for item in inputs])
inputs = list(zip(*inputs))
if shuffle:
np.random.shuffle(inputs)
batch = []
bs = batch_size
for d in inputs:
if len(d[0]) > max_length:
bs = max(1, min(batch_size * max_length / len(d[0]), bs))
if len(batch) < bs:
batch.append(d)
else:
yield list(zip(*batch))
batch = [d]
if len(d[0]) < max_length:
bs = batch_size
else:
bs = max(1, batch_size * max_length / len(d[0]))
if batch:
yield list(zip(*batch))
| 43.442136
| 131
| 0.555294
|
7352c08c3df705bbb0de6ece15eaa02958efb3e7
| 1,436
|
py
|
Python
|
test/test_bad_request_source.py
|
hypostulate/mbta-api-client
|
f18903b6269c523c733a31574ff4579349fed3f8
|
[
"MIT"
] | null | null | null |
test/test_bad_request_source.py
|
hypostulate/mbta-api-client
|
f18903b6269c523c733a31574ff4579349fed3f8
|
[
"MIT"
] | null | null | null |
test/test_bad_request_source.py
|
hypostulate/mbta-api-client
|
f18903b6269c523c733a31574ff4579349fed3f8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
MBTA
MBTA service API. https://www.mbta.com Source code: https://github.com/mbta/api # noqa: E501
The version of the OpenAPI document: 3.0
Contact: developer@mbta.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.bad_request_source import BadRequestSource # noqa: E501
from openapi_client.rest import ApiException
class TestBadRequestSource(unittest.TestCase):
"""BadRequestSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test BadRequestSource
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.bad_request_source.BadRequestSource() # noqa: E501
if include_optional :
return BadRequestSource(
parameter = 'sort'
)
else :
return BadRequestSource(
)
def testBadRequestSource(self):
"""Test BadRequestSource"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 26.592593
| 97
| 0.676184
|
d00dc8f0343d56d319263b74102fea2cd36830c1
| 3,735
|
py
|
Python
|
nbox/framework/__init__.py
|
NimbleBoxAI/aibox
|
9d1f76dd6b5d1435dc3f351800153db76cc1369c
|
[
"BSD-3-Clause"
] | 9
|
2021-09-28T19:01:39.000Z
|
2022-03-22T19:01:29.000Z
|
nbox/framework/__init__.py
|
NimbleBoxAI/nbox
|
9d1f76dd6b5d1435dc3f351800153db76cc1369c
|
[
"BSD-3-Clause"
] | 2
|
2021-08-06T10:25:41.000Z
|
2022-03-18T08:31:19.000Z
|
nbox/framework/__init__.py
|
NimbleBoxAI/nbox
|
9d1f76dd6b5d1435dc3f351800153db76cc1369c
|
[
"BSD-3-Clause"
] | 4
|
2021-09-17T12:40:44.000Z
|
2022-02-16T06:14:44.000Z
|
r"""This submodule concerns itself with conversion of different framworks to other frameworks.
It achieves this by providing a fix set of functions for each framework. There are a couple of
caveats that the developer must know about.
1. We use joblib to serialize the model, see `reason <https://stackoverflow.com/questions/12615525/what-are-the-different-use-cases-of-joblib-versus-pickle>`_ \
so when you will try to unpickle the model ``pickle`` will not work correctly and will throw the error
``_pickle.UnpicklingError: invalid load key, '\x00'``. So ensure that you use ``joblib``.
2. Serializing torch models directly is a bit tricky and weird, you can read more about it
`here <https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/docs/serialization.md>`_,
so technically pytorch torch.save() automatically pickles the object along with the required
datapoint (model hierarchy, constants, data, etc.)
Lazy Loading
------------
Lazy loading is a mechanism that allows you to load the model only when you need it, this is easier said than
done because you need to add many checks and balances at varaious locations in the code. The way it works here
is that we check from a list of modules which are required to import which part of the model.
Documentation
-------------
"""
# this function is for getting the meta data and is framework agnostic, so adding this in the
# __init__ of framework submodule
def get_meta(input_names, input_shapes, args, output_names, output_shapes, outputs):
"""Generic method to convert the inputs to get ``nbox_meta['metadata']`` dictionary"""
# get the meta object
def __get_struct(names_, shapes_, tensors_):
return {
name: {
"dtype": str(tensor.dtype),
"tensorShape": {"dim": [{"name": "", "size": x} for x in shapes], "unknownRank": False},
"name": name,
}
for name, shapes, tensor in zip(names_, shapes_, tensors_)
}
meta = {"inputs": __get_struct(input_names, input_shapes, args), "outputs": __get_struct(output_names, output_shapes, outputs)}
return meta
__all__ = ["get_meta"]
from types import SimpleNamespace
from ..utils import _isthere, folder, join
def update_all_lazy_loading(*modules, fname):
"""Lazy load modules and update the ``__all__`` list"""
import sys
global __all__
if _isthere(*modules):
sys.path.append(join(folder(__file__), f"{fname}.py"))
_all_name = f"{fname}_all"
eval(f"from .{fname} import *")
eval(f"from .{fname} import __all__ as {_all_name}")
maps = {f"{x}": globals()[x] for x in _all_name}
maps.update({"IMPORTS": [*modules]})
_name_of_module = fname.strip("_")
eval(f"{_name_of_module} = SimpleNamespace(**maps)")
__all__ += [f"{_name_of_module}"]
# update_all_lazy_loading("torch", fname = "__pytorch")
# update_all_lazy_loading("sklearn", "sk2onnx", fname = "__sklearn")
_pt_modules = ["torch"]
if _isthere(*_pt_modules):
from .__pytorch import *
from .__pytorch import __all__ as _pt_all
maps = {f"{x}": globals()[x] for x in _pt_all}
maps.update({"IMPORTS": _pt_modules})
pytorch = SimpleNamespace(**maps)
__all__ += ["pytorch"]
_sk_modules = ["sklearn", "sk2onnx"]
if _isthere(*_sk_modules):
from .__sklearn import *
from .__sklearn import __all__ as _sk_all
maps = {f"{x}": globals()[x] for x in _sk_all}
maps.update({"IMPORTS": _sk_modules})
sklearn = SimpleNamespace(**maps)
__all__ += ["sklearn"]
_onnx_modules = ["onnx", "onnxruntime"]
if _isthere(*_onnx_modules):
maps = {"IMPORTS": _onnx_modules}
onnx = SimpleNamespace(**maps)
__all__ += ["onnx"]
| 40.16129
| 160
| 0.685408
|
dc9e185ca9f3550d54cd41d0a8182bf42957cefe
| 7,451
|
py
|
Python
|
homeassistant/components/screenlogic/climate.py
|
tizzen33/core
|
2a1884a1f7a07848b8b63afd29f59c81f1ffaf62
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
homeassistant/components/screenlogic/climate.py
|
tizzen33/core
|
2a1884a1f7a07848b8b63afd29f59c81f1ffaf62
|
[
"Apache-2.0"
] | 87
|
2020-07-15T13:43:35.000Z
|
2022-03-23T07:43:10.000Z
|
homeassistant/components/screenlogic/climate.py
|
tizzen33/core
|
2a1884a1f7a07848b8b63afd29f59c81f1ffaf62
|
[
"Apache-2.0"
] | 7
|
2018-10-04T10:12:45.000Z
|
2021-12-29T20:55:40.000Z
|
"""Support for a ScreenLogic heating device."""
import logging
from screenlogicpy.const import DATA as SL_DATA, EQUIPMENT, HEAT_MODE
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.restore_state import RestoreEntity
from . import ScreenlogicEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SUPPORTED_FEATURES = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORTED_MODES = [HVAC_MODE_OFF, HVAC_MODE_HEAT]
SUPPORTED_PRESETS = [
HEAT_MODE.SOLAR,
HEAT_MODE.SOLAR_PREFERRED,
HEAT_MODE.HEATER,
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up entry."""
entities = []
coordinator = hass.data[DOMAIN][config_entry.entry_id]
for body in coordinator.data[SL_DATA.KEY_BODIES]:
entities.append(ScreenLogicClimate(coordinator, body))
async_add_entities(entities)
class ScreenLogicClimate(ScreenlogicEntity, ClimateEntity, RestoreEntity):
"""Represents a ScreenLogic climate entity."""
def __init__(self, coordinator, body):
"""Initialize a ScreenLogic climate entity."""
super().__init__(coordinator, body)
self._configured_heat_modes = []
# Is solar listed as available equipment?
if self.coordinator.data["config"]["equipment_flags"] & EQUIPMENT.FLAG_SOLAR:
self._configured_heat_modes.extend(
[HEAT_MODE.SOLAR, HEAT_MODE.SOLAR_PREFERRED]
)
self._configured_heat_modes.append(HEAT_MODE.HEATER)
self._last_preset = None
@property
def name(self) -> str:
"""Name of the heater."""
ent_name = self.body["heat_status"]["name"]
return f"{self.gateway_name} {ent_name}"
@property
def min_temp(self) -> float:
"""Minimum allowed temperature."""
return self.body["min_set_point"]["value"]
@property
def max_temp(self) -> float:
"""Maximum allowed temperature."""
return self.body["max_set_point"]["value"]
@property
def current_temperature(self) -> float:
"""Return water temperature."""
return self.body["last_temperature"]["value"]
@property
def target_temperature(self) -> float:
"""Target temperature."""
return self.body["heat_set_point"]["value"]
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
if self.config_data["is_celsius"]["value"] == 1:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def hvac_mode(self) -> str:
"""Return the current hvac mode."""
if self.body["heat_mode"]["value"] > 0:
return HVAC_MODE_HEAT
return HVAC_MODE_OFF
@property
def hvac_modes(self):
"""Return th supported hvac modes."""
return SUPPORTED_MODES
@property
def hvac_action(self) -> str:
"""Return the current action of the heater."""
if self.body["heat_status"]["value"] > 0:
return CURRENT_HVAC_HEAT
if self.hvac_mode == HVAC_MODE_HEAT:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@property
def preset_mode(self) -> str:
"""Return current/last preset mode."""
if self.hvac_mode == HVAC_MODE_OFF:
return HEAT_MODE.NAME_FOR_NUM[self._last_preset]
return HEAT_MODE.NAME_FOR_NUM[self.body["heat_mode"]["value"]]
@property
def preset_modes(self):
"""All available presets."""
return [
HEAT_MODE.NAME_FOR_NUM[mode_num] for mode_num in self._configured_heat_modes
]
@property
def supported_features(self):
"""Supported features of the heater."""
return SUPPORTED_FEATURES
async def async_set_temperature(self, **kwargs) -> None:
"""Change the setpoint of the heater."""
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
raise ValueError(f"Expected attribute {ATTR_TEMPERATURE}")
async with self.coordinator.api_lock:
success = await self.hass.async_add_executor_job(
self.gateway.set_heat_temp, int(self._data_key), int(temperature)
)
if success:
await self.coordinator.async_request_refresh()
else:
raise HomeAssistantError(
f"Failed to set_temperature {temperature} on body {self.body['body_type']['value']}"
)
async def async_set_hvac_mode(self, hvac_mode) -> None:
"""Set the operation mode."""
if hvac_mode == HVAC_MODE_OFF:
mode = HEAT_MODE.OFF
else:
mode = HEAT_MODE.NUM_FOR_NAME[self.preset_mode]
async with self.coordinator.api_lock:
success = await self.hass.async_add_executor_job(
self.gateway.set_heat_mode, int(self._data_key), int(mode)
)
if success:
await self.coordinator.async_request_refresh()
else:
raise HomeAssistantError(
f"Failed to set_hvac_mode {mode} on body {self.body['body_type']['value']}"
)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode."""
_LOGGER.debug("Setting last_preset to %s", HEAT_MODE.NUM_FOR_NAME[preset_mode])
self._last_preset = mode = HEAT_MODE.NUM_FOR_NAME[preset_mode]
if self.hvac_mode == HVAC_MODE_OFF:
return
async with self.coordinator.api_lock:
success = await self.hass.async_add_executor_job(
self.gateway.set_heat_mode, int(self._data_key), int(mode)
)
if success:
await self.coordinator.async_request_refresh()
else:
raise HomeAssistantError(
f"Failed to set_preset_mode {mode} on body {self.body['body_type']['value']}"
)
async def async_added_to_hass(self):
"""Run when entity is about to be added."""
await super().async_added_to_hass()
_LOGGER.debug("Startup last preset is %s", self._last_preset)
if self._last_preset is not None:
return
prev_state = await self.async_get_last_state()
if (
prev_state is not None
and prev_state.attributes.get(ATTR_PRESET_MODE) is not None
):
_LOGGER.debug(
"Startup setting last_preset to %s from prev_state",
HEAT_MODE.NUM_FOR_NAME[prev_state.attributes.get(ATTR_PRESET_MODE)],
)
self._last_preset = HEAT_MODE.NUM_FOR_NAME[
prev_state.attributes.get(ATTR_PRESET_MODE)
]
else:
_LOGGER.debug(
"Startup setting last_preset to default (%s)",
self._configured_heat_modes[0],
)
self._last_preset = self._configured_heat_modes[0]
@property
def body(self):
"""Shortcut to access body data."""
return self.coordinator.data[SL_DATA.KEY_BODIES][self._data_key]
| 33.714932
| 100
| 0.643404
|
187ba4f53fe90367dc3e93593f506e59cdfcee16
| 78,980
|
py
|
Python
|
runs/2016/dnn2016med_traps/traps9/src/dataset.py
|
rohit21122012/DCASE2013
|
5360402265331d1c9032cf0fa94ef73f8c3c1047
|
[
"MIT"
] | 2
|
2016-10-19T06:26:50.000Z
|
2016-10-19T13:39:42.000Z
|
runs/2016/dnn2016med_traps/traps9/src/dataset.py
|
rohit21122012/DCASE2013
|
5360402265331d1c9032cf0fa94ef73f8c3c1047
|
[
"MIT"
] | null | null | null |
runs/2016/dnn2016med_traps/traps9/src/dataset.py
|
rohit21122012/DCASE2013
|
5360402265331d1c9032cf0fa94ef73f8c3c1047
|
[
"MIT"
] | 2
|
2016-06-29T02:32:05.000Z
|
2017-08-05T08:15:11.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import locale
import socket
import tarfile
import urllib2
import zipfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from files import *
from general import *
from ui import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(
title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_train.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path, 'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name='CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(
self.relative_to_absolute_path(os.path.join('chime_home', 'chunks', row[1] + '.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
fold += 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
| 39.688442
| 143
| 0.518005
|
139e590525475657c335071ae28d9f2bf096dfb2
| 463
|
py
|
Python
|
mdv/tests/files/manual/run_all.py
|
guye1296/terminal_markdown_viewer
|
80f333ba51dc2f1dfa854e203d3374a112aecbd3
|
[
"BSD-3-Clause"
] | 1,745
|
2015-07-14T09:35:20.000Z
|
2022-03-31T15:53:06.000Z
|
mdv/tests/files/manual/run_all.py
|
guye1296/terminal_markdown_viewer
|
80f333ba51dc2f1dfa854e203d3374a112aecbd3
|
[
"BSD-3-Clause"
] | 91
|
2015-07-15T13:44:31.000Z
|
2022-02-06T14:18:08.000Z
|
mdv/tests/files/manual/run_all.py
|
guye1296/terminal_markdown_viewer
|
80f333ba51dc2f1dfa854e203d3374a112aecbd3
|
[
"BSD-3-Clause"
] | 140
|
2015-07-14T10:11:50.000Z
|
2022-03-19T22:46:57.000Z
|
#!/usr/bin/env python2.7
print 'Please inspect visually.'
import os, sys
sys.argv.append('')
# substring match on filenames, if not given run all:
match = sys.argv[1]
exe = '../../mdv/markdownviewer.py'
for file in os.listdir('.'):
if not file.endswith('.md') or not match in file:
continue
print '\n\n'
os.system('echo "----\nTESTFILE:\n# %s\n-----" | %s -' % (file, exe))
print open(file).read()
os.system('%s "%s"' % (exe, file))
| 27.235294
| 73
| 0.598272
|
550b20bd2ed32b1b48be31c5bad7ac1c883ca89d
| 3,825
|
py
|
Python
|
google/appengine/_internal/django/core/files/base.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/google/appengine/_internal/django/core/files/base.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/google/appengine/_internal/django/core/files/base.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from google.appengine._internal.django.utils.encoding import smart_str, smart_unicode
from google.appengine._internal.django.core.files.utils import FileProxyMixin
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
self.mode = getattr(file, 'mode', None)
def __str__(self):
return smart_str(self.name or '')
def __unicode__(self):
return smart_unicode(self.name or u'')
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self or "None")
def __nonzero__(self):
return bool(self.name)
def __len__(self):
return self.size
def _get_size(self):
if not hasattr(self, '_size'):
if hasattr(self.file, 'size'):
self._size = self.file.size
elif os.path.exists(self.file.name):
self._size = os.path.getsize(self.file.name)
else:
raise AttributeError("Unable to determine the file's size.")
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_closed(self):
return not self.file or self.file.closed
closed = property(_get_closed)
def chunks(self, chunk_size=None):
"""
Read the file and yield chucks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
if hasattr(self, 'seek'):
self.seek(0)
# Assume the pointer is at zero...
counter = self.size
while counter > 0:
yield self.read(chunk_size)
counter -= chunk_size
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
chunk_buffer = StringIO(chunk)
for line in chunk_buffer:
if buffer_:
line = buffer_ + line
buffer_ = None
# If this is the end of a line, yield
# otherwise, wait for the next round
if line[-1] in ('\n', '\r'):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content):
content = content or ''
super(ContentFile, self).__init__(StringIO(content))
self.size = len(content)
def __str__(self):
return 'Raw content'
def __nonzero__(self):
return True
def open(self, mode=None):
self.seek(0)
def close(self):
pass
| 28.333333
| 85
| 0.579869
|
b214e8e4c22725a924739c28e7398529f668608d
| 1,894
|
py
|
Python
|
ppq/samples/quantize_dsp.py
|
openppl-public/ppq
|
0fdea7d4982bc57feb6bb8548c7f012707fbd607
|
[
"Apache-2.0"
] | 100
|
2021-12-31T09:34:06.000Z
|
2022-03-25T02:54:51.000Z
|
ppq/samples/quantize_dsp.py
|
openppl-public/ppq
|
0fdea7d4982bc57feb6bb8548c7f012707fbd607
|
[
"Apache-2.0"
] | 12
|
2021-12-31T10:28:15.000Z
|
2022-03-31T07:08:44.000Z
|
ppq/samples/quantize_dsp.py
|
openppl-public/ppq
|
0fdea7d4982bc57feb6bb8548c7f012707fbd607
|
[
"Apache-2.0"
] | 21
|
2021-12-31T09:51:02.000Z
|
2022-03-30T12:21:55.000Z
|
from typing import Iterable
import torch
import torchvision
from ppq import BaseGraph, QuantizationSettingFactory, TargetPlatform
from ppq.api import export_ppq_graph, quantize_torch_model
from torch.utils.data import DataLoader
BATCHSIZE = 32
INPUT_SHAPE = [3, 224, 224]
DEVICE = 'cuda' # only cuda is fully tested :( For other executing device there might be bugs.
PLATFORM = TargetPlatform.PPL_DSP_INT8 # identify a target platform for your network.
def load_calibration_dataset() -> Iterable:
return [torch.rand(size=INPUT_SHAPE) for _ in range(32)]
def collate_fn(batch: torch.Tensor) -> torch.Tensor:
return batch.to(DEVICE)
# Load a pretrained mobilenet v2 model
model = torchvision.models.mobilenet.mobilenet_v2(pretrained=True)
model = model.to(DEVICE)
# create a setting for quantizing your network with PPL CUDA.
quant_setting = QuantizationSettingFactory.pplcuda_setting()
quant_setting.equalization = True # use layerwise equalization algorithm.
quant_setting.dispatcher = 'conservative' # dispatch this network in conservertive way.
# Load training data for creating a calibration dataloader.
calibration_dataset = load_calibration_dataset()
calibration_dataloader = DataLoader(
dataset=calibration_dataset,
batch_size=BATCHSIZE, shuffle=True)
# quantize your model.
quantized = quantize_torch_model(
model=model, calib_dataloader=calibration_dataloader,
calib_steps=32, input_shape=[BATCHSIZE] + INPUT_SHAPE,
setting=quant_setting, collate_fn=collate_fn, platform=PLATFORM,
onnx_export_file='Output/onnx.model', device=DEVICE, verbose=0)
# Quantization Result is a PPQ BaseGraph instance.
assert isinstance(quantized, BaseGraph)
# export quantized graph.
export_ppq_graph(graph=quantized, platform=PLATFORM,
graph_save_to='Output/quantized(onnx).onnx',
config_save_to='Output/quantized(onnx).json')
| 38.653061
| 95
| 0.786167
|
2bdf58b94d41d4986bfc9bd1faad2d7817695c3f
| 11,278
|
py
|
Python
|
repos.py
|
fisadev/student-repos-handler
|
da45f29bce844e221ba5c509189d7fa10296775e
|
[
"MIT"
] | null | null | null |
repos.py
|
fisadev/student-repos-handler
|
da45f29bce844e221ba5c509189d7fa10296775e
|
[
"MIT"
] | null | null | null |
repos.py
|
fisadev/student-repos-handler
|
da45f29bce844e221ba5c509189d7fa10296775e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
import sys
from os import path, system, listdir, putenv
from collections import namedtuple
import requests
from termcolor import colored
Repo = namedtuple('Repo', 'vcs slug description')
class Repo(object):
def __init__(self, alias, vcs, features, service, slug, server, description):
self.alias = alias
self.vcs = vcs
self.features = features
self.service = service
self.slug = slug
self.server = server
self.description = description
def clone_url(self, section):
if self.service == 'bitbucket':
if self.vcs == 'hg':
base_url = 'https://bitbucket.org/%s' % self.slug
elif self.vcs == 'git':
base_url = 'git@bitbucket.org:%s.git' % self.slug
elif self.service == 'github':
if self.vcs != 'git':
raise NotImplementedError('Github only supports git')
else:
base_url = 'git@github.com:%s.git' % self.slug
elif self.service == 'gitlab':
if self.vcs != 'git':
raise NotImplementedError('Gitlab only supports git')
else:
base_url = 'git@gitlab.com:%s.git' % self.slug
else:
raise NotImplementedError('Currently %s is not suported as service' % self.service)
if section == 'code':
return base_url
elif section == 'wiki':
if self.service == 'bitbucket':
return base_url + '/wiki'
elif self.service in ('github', 'gitlab'):
return base_url.replace('.git', '.wiki.git')
else:
raise NotImplementedError('Unknown section %s' % section)
def web_url(self, section=None):
if self.service == 'bitbucket':
url = 'https://bitbucket.org/%s' % self.slug
elif self.service == 'github':
url = 'https://github.com/%s' % self.slug
elif self.service == 'gitlab':
url = 'https://gitlab.com/%s' % self.slug
else:
raise NotImplementedError('Currently %s is not suported as service' % self.service)
if section == 'wiki':
url += '/wiki'
return url
def path(self, section, repos_root):
base_path = path.join(repos_root, self.alias)
if section == 'code':
return base_path
elif section == 'wiki':
return base_path + '-wiki'
else:
raise NotImplementedError('Unknown section %s' % section)
def __str__(self):
return self.alias
def long_description(self):
return '%s: %s (%s at %s, %s, %s)' % (self.alias, self.description,
self.slug, self.service,
self.vcs,
'-'.join(self.features))
class ReposHandler(object):
def __init__(self, repos, repos_root):
self.repos = repos
self.repos_root = repos_root
def filter_repos(self, filters):
if not filters:
filtered = self.repos
else:
filters = [f.lower() for f in filters]
filtered = [repo for repo in self.repos
if any(f in repo.long_description().lower()
for f in filters)]
if filtered:
print(colored('%i repos found' % len(filtered), 'green'))
else:
print(colored('No repos matching the filters', 'red'))
return filtered
def iterate_filtered_repos(self, filters):
repos = self.filter_repos(filters)
for repo in repos:
print(colored('-- %s --' % repo, 'green'))
yield repo
def vcs_action_on_repos(self, filters, vcs_action):
for repo in self.iterate_filtered_repos(filters):
if 'code' in repo.features:
print(colored(' -- Code --', 'green'))
vcs_action(repo, 'code')
if 'wiki' in repo.features:
print(colored(' -- Wiki --', 'green'))
vcs_action(repo, 'wiki')
def update(self, *filters):
self.vcs_action_on_repos(filters, self.update_vcs)
def clean(self, *filters):
self.vcs_action_on_repos(filters, self.clean_vcs)
def status(self, *filters):
self.vcs_action_on_repos(filters, self.status_vcs)
def update_vcs(self, repo, section):
repo_url = repo.clone_url(section)
repo_path = repo.path(section, self.repos_root)
if path.exists(repo_path):
if repo.vcs == 'hg':
pull_command = 'hg pull -u'
elif repo.vcs == 'git':
pull_command = 'git pull'
command = '(cd %s && %s)' % (repo_path, pull_command)
else:
if repo.vcs == 'hg':
clone_command = 'hg clone'
elif repo.vcs == 'git':
clone_command = 'git clone'
command = '%s %s %s' % (clone_command, repo_url, repo_path)
result = system(command)
if result != 0:
print(colored('Error running command', 'red'))
def clean_vcs(self, repo, section):
repo_path = repo.path(section, self.repos_root)
if repo.vcs == 'hg':
clean_command = 'hg revert --all --no-backup'
elif repo.vcs == 'git':
clean_command = 'git checkout -- .'
command = '(cd %s && %s)' % (repo_path, clean_command)
result = system(command)
if result != 0:
print(colored('Error running command', 'red'))
def status_vcs(self, repo, section):
repo_path = repo.path(section, self.repos_root)
if repo.vcs == 'hg':
clean_command = 'hg status'
elif repo.vcs == 'git':
clean_command = 'git status'
command = '(cd %s && %s)' % (repo_path, clean_command)
result = system(command)
if result != 0:
print(colored('Error running command', 'red'))
def code(self, editor, file_, *filters):
return self.open_vcs_file('code', editor, filters, file_, any_extension=False)
def wiki(self, editor, file_, *filters):
return self.open_vcs_file('wiki', editor, filters, file_, any_extension=True)
def wiki_web(self, browser, url, *filters):
for repo in self.iterate_filtered_repos(filters):
full_url = '%s/%s' % (repo.web_url('wiki'), url)
system('%s %s' % (browser, full_url))
def server(self, browser, *filters):
for repo in self.iterate_filtered_repos(filters):
system('%s %s' % (browser, repo.server))
def revive_server(self, *filters):
for repo in self.iterate_filtered_repos(filters):
print('Accessing server...')
response = requests.get(repo.server)
print('Response code:', response.status_code)
def run(self, command, *filters):
for repo in self.iterate_filtered_repos(filters):
repo_path = repo.path('code', self.repos_root)
putenv("REPO_PATH", repo_path)
result = system('(cd %s && %s)' % (repo_path, command))
if result != 0:
print(colored('Error running command', 'red'))
print()
def open_vcs_file(self, section, editor, filters, file_, any_extension=False):
for repo in self.iterate_filtered_repos(filters):
file_path = path.join(repo.path(section, self.repos_root), file_)
possible_files = []
if any_extension:
directory = path.dirname(file_path)
if path.exists(directory):
possible_files = [path.join(directory, file_name)
for file_name in listdir(directory)
if file_name.split('.')[0] == file_]
else:
if path.exists(file_path):
possible_files = [file_path,]
if not possible_files:
print(colored('File does not exists', 'red'))
elif len(possible_files) > 1:
print(colored('Many files on the wiki with that name:', 'red'))
print('\n'.join(possible_files))
else:
system('%s %s' % (editor, possible_files[0]))
def list(self, *filters):
repos = self.filter_repos(filters)
for repo in repos:
print(repo.long_description())
def show_urls(self, *filters):
for repo in self.iterate_filtered_repos(filters):
print(repo.long_description())
print(repo.web_url())
print(repo.web_url('wiki'))
print(repo.server)
@classmethod
def find_repos_config(cls, start_path):
current_path = start_path
while current_path:
config_path = path.join(current_path, 'repos.config')
if path.exists(config_path):
return config_path
else:
if current_path == '/':
current_path = None
else:
current_path = path.dirname(current_path)
@classmethod
def read_repos_from_file(cls, file_path):
repos = []
with open(file_path) as repos_file:
for line in repos_file.read().strip().split('\n'):
if not line.startswith('#'):
data = line.split('|')
alias, vcs, features, service, slug, server, description = data
features = features.split(',')
repo = Repo(alias=alias,
vcs=vcs,
features=features,
service=service,
slug=slug,
server=server,
description=description)
repos.append(repo)
if len(repos) != len(set(repo.alias for repo in repos)):
raise ValueError('There are repos with the same alias')
return repos
def main():
current_path = path.abspath('.')
config_path = ReposHandler.find_repos_config(current_path)
if not config_path:
print(colored('Unable to find repos.config', 'red'))
sys.exit(1)
handler = ReposHandler(ReposHandler.read_repos_from_file(config_path), current_path)
if len(sys.argv) < 2:
print('Usage:')
print('repos list FILTERS')
print('repos status FILTERS')
print('repos clean FILTERS')
print('repos update FILTERS')
print('repos code EDITOR FILE FILTERS')
print('repos wiki EDITOR FILE FILTERS')
print('repos wiki_web BROWSER URL FILTERS')
print('repos server BROWSER FILTERS')
print('repos revive_server BROWSER FILTERS')
print('repos run COMMAND FILTERS')
print('repos show_urls FILTERS')
exit()
action = sys.argv[1]
method = getattr(handler, action)
if method:
try:
method(*sys.argv[2:])
except KeyboardInterrupt:
print("Cancelled")
if __name__ == '__main__':
main()
| 35.354232
| 95
| 0.546019
|
a26e77c2db38e053bb17af33ac639ae747124d2f
| 8,765
|
py
|
Python
|
TLCS/generator.py
|
BridgeQZH/Deep-QLearning-Agent-for-Traffic-Signal-Control
|
194924e51d816daa992faa4bd8cdbfd22dc7b3e8
|
[
"MIT"
] | null | null | null |
TLCS/generator.py
|
BridgeQZH/Deep-QLearning-Agent-for-Traffic-Signal-Control
|
194924e51d816daa992faa4bd8cdbfd22dc7b3e8
|
[
"MIT"
] | null | null | null |
TLCS/generator.py
|
BridgeQZH/Deep-QLearning-Agent-for-Traffic-Signal-Control
|
194924e51d816daa992faa4bd8cdbfd22dc7b3e8
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
class TrafficGenerator:
def __init__(self, max_steps, n_cars_generated):
self._n_cars_generated = n_cars_generated # how many cars per episode
self._max_steps = max_steps
def generate_routefile(self, seed):
"""
Generation of the route of every car for one episode
"""
np.random.seed(seed) # make tests reproducible
# the generation of cars is distributed according to a weibull distribution
timings = np.random.weibull(2, self._n_cars_generated)
timings = np.sort(timings) # Return a sorted copy of an array
# reshape the distribution to fit the interval 0:max_steps
car_gen_steps = []
min_old = math.floor(timings[1])
max_old = math.ceil(timings[-1])
min_new = 0
max_new = self._max_steps
for value in timings:
car_gen_steps = np.append(car_gen_steps, ((max_new - min_new) / (max_old - min_old)) * (value - max_old) + max_new)
car_gen_steps = np.rint(car_gen_steps) # round every value to int -> effective steps when a car will be generated
# produce the file for cars generation, one car per line
with open("intersection/episode_routes.rou.xml", "w") as routes:
print("""<routes>
<vType accel="1.0" decel="4.5" id="standard_car" length="5.0" minGap="2.5" maxSpeed="25" sigma="0.5" />
<route id="W_N" edges="W2TL TL2N"/>
<route id="W_E" edges="W2TL TL2E"/>
<route id="W_S" edges="W2TL TL2S"/>
<route id="N_W" edges="N2TL TL2W"/>
<route id="N_E" edges="N2TL TL2E"/>
<route id="N_S" edges="N2TL TL2S"/>
<route id="E_W" edges="E2TL TL2W"/>
<route id="E_N" edges="E2TL TL2N"/>
<route id="E_S" edges="E2TL TL2S"/>
<route id="S_W" edges="S2TL TL2W"/>
<route id="S_N" edges="S2TL TL2N"/>
<route id="S_E" edges="S2TL TL2E"/>""", file=routes)
for car_counter, step in enumerate(car_gen_steps): # adds a counter to an iterable and returns it in a form of enumerating object
straight_or_turn = np.random.uniform()
car_type = np.random.randint(1,11)
if car_type <= 11:
if straight_or_turn < 0.75: # choose direction: straight or turn - 75% of times the car goes straight
route_straight = np.random.randint(1, 7) # choose a random source & destination
if route_straight == 1 or route_straight == 2:
print(' <vehicle id="W_E_%i" type="standard_car" route="W_E" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_straight == 3 or route_straight == 4:
print(' <vehicle id="E_W_%i" type="standard_car" route="E_W" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_straight == 5:
print(' <vehicle id="N_S_%i" type="standard_car" route="N_S" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
else:
print(' <vehicle id="S_N_%i" type="standard_car" route="S_N" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
else: # car that turn - 25% of the time the car turns
route_turn = np.random.randint(1, 13) # choose random source source & destination
if route_turn == 1 or route_turn == 2:
print(' <vehicle id="W_N_%i" type="standard_car" route="W_N" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 3 or route_turn == 4:
print(' <vehicle id="W_S_%i" type="standard_car" route="W_S" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 5:
print(' <vehicle id="N_W_%i" type="standard_car" route="N_W" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 6:
print(' <vehicle id="N_E_%i" type="standard_car" route="N_E" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 7 or route_turn == 8:
print(' <vehicle id="E_N_%i" type="standard_car" route="E_N" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 9 or route_turn == 10:
print(' <vehicle id="E_S_%i" type="standard_car" route="E_S" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 11:
print(' <vehicle id="S_W_%i" type="standard_car" route="S_W" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 12:
print(' <vehicle id="S_E_%i" type="standard_car" route="S_E" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
else:
if straight_or_turn < 0.75: # choose direction: straight or turn - 75% of times the car goes straight
route_straight = np.random.randint(1, 5) # choose a random source & destination
if route_straight == 1:
print(' <vehicle id="W_E_%i" type="slow_start_car" route="W_E" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_straight == 2:
print(' <vehicle id="E_W_%i" type="slow_start_car" route="E_W" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_straight == 3:
print(' <vehicle id="N_S_%i" type="slow_start_car" route="N_S" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
else:
print(' <vehicle id="S_N_%i" type="slow_start_car" route="S_N" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
else: # car that turn -25% of the time the car turns
route_turn = np.random.randint(1, 9) # choose random source source & destination
if route_turn == 1:
print(' <vehicle id="W_N_%i" type="slow_start_car" route="W_N" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 2:
print(' <vehicle id="W_S_%i" type="slow_start_car" route="W_S" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 3:
print(' <vehicle id="N_W_%i" type="slow_start_car" route="N_W" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 4:
print(' <vehicle id="N_E_%i" type="slow_start_car" route="N_E" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 5:
print(' <vehicle id="E_N_%i" type="slow_start_car" route="E_N" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 6:
print(' <vehicle id="E_S_%i" type="slow_start_car" route="E_S" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 7:
print(' <vehicle id="S_W_%i" type="slow_start_car" route="S_W" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
elif route_turn == 8:
print(' <vehicle id="S_E_%i" type="slow_start_car" route="S_E" depart="%s" departLane="random" departSpeed="10" />' % (car_counter, step), file=routes)
print("</routes>", file=routes)
| 79.681818
| 182
| 0.552767
|
c71fcd717893db02087c8fdcbd67f460ebd87753
| 2,254
|
py
|
Python
|
underline_macro.py
|
Nateowami/libreoffice-underline-macro
|
2dc3178a719ee9a80929ba067f377cff3a966536
|
[
"MIT"
] | 1
|
2021-04-19T13:26:39.000Z
|
2021-04-19T13:26:39.000Z
|
underline_macro.py
|
Nateowami/libreoffice-underline-macro
|
2dc3178a719ee9a80929ba067f377cff3a966536
|
[
"MIT"
] | null | null | null |
underline_macro.py
|
Nateowami/libreoffice-underline-macro
|
2dc3178a719ee9a80929ba067f377cff3a966536
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import uno
# Logging util for debugging when run from within LibreOffice.
def log(data):
# Logging only works on *nix systems
f = open('~/log.txt', 'a')
f.write(data + '\n')
f.close()
def getDesktop():
# If being run from inside LibreOffice, XSCRIPTCONTEXT will be defined
if 'XSCRIPTCONTEXT' in globals():
return XSCRIPTCONTEXT.getDesktop()
# Otherwise, if we're running form the command line, we have to connect to
# a running instance of LibreOffice. Libreoffice must be started to listen
# on a socket that we connect to in order for this to work.
localContext = uno.getComponentContext()
# create the UnoUrlResolver
resolver = localContext.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", localContext)
# connect to the running office
ctx = resolver.resolve(
"uno:socket,host=localhost,port=2002;urp;"
"StarOffice.ComponentContext")
smgr = ctx.ServiceManager
# get the central desktop object
return smgr.createInstanceWithContext("com.sun.star.frame.Desktop", ctx)
def Underline_Words():
desktop = getDesktop()
model = desktop.getCurrentComponent()
# Remove closing "tags" that are immediately followed by opening tags.
# Otherwise there could be tiny gaps in the underlining.
apply_regex(model, 'x% ?%x', '')
apply_regex(model, '%x(.*?)x%', '$1', {'CharUnderline': 1})
def apply_regex(model, search, replace, replaceAttrs={}):
replaceDescriptor = model.createReplaceDescriptor()
props = structify(replaceAttrs)
replaceDescriptor.setReplaceAttributes(props)
replaceDescriptor.SearchRegularExpression = True
replaceDescriptor.SearchString = search
replaceDescriptor.ReplaceString = replace
numReplaced = model.replaceAll(replaceDescriptor)
return numReplaced
def structify(keypairs):
result = []
for key, value in keypairs.items():
struct = uno.createUnoStruct('com.sun.star.beans.PropertyValue')
struct.Name = key
struct.Value = value
result.append(struct)
return tuple(result)
g_exportedScripts = (Underline_Words, )
if __name__ == "__main__":
Underline_Words()
| 30.459459
| 78
| 0.700532
|
06f8766c417a71951bcd7909236a6e6638046f1a
| 368
|
py
|
Python
|
labs/lab10/ex1/ex1.py
|
jamestiotio/dbsys
|
26f545a51626ce232c0dc26b70ef206e71b273fc
|
[
"MIT"
] | null | null | null |
labs/lab10/ex1/ex1.py
|
jamestiotio/dbsys
|
26f545a51626ce232c0dc26b70ef206e71b273fc
|
[
"MIT"
] | null | null | null |
labs/lab10/ex1/ex1.py
|
jamestiotio/dbsys
|
26f545a51626ce232c0dc26b70ef206e71b273fc
|
[
"MIT"
] | null | null | null |
import sys
if sys.version_info[0] == 3:
from functools import reduce
else:
pass
def min(l):
return reduce(lambda x, y: x if x < y else y, l, sys.maxsize)
def max(l):
return reduce(lambda x, y: x if x > y else y, l, -sys.maxsize - 1)
if __name__ == "__main__":
assert min([32, 63, 7, 10, 100]) == 7
assert max([32, 63, 7, 10, 100]) == 100
| 19.368421
| 70
| 0.592391
|
a2ae6bcfe0f18ddf6a1811247e8e8a8b79ff7585
| 5,598
|
py
|
Python
|
src/computer_vision/training/transfer_classifier.py
|
mldiego/Platooning-F1Tenth
|
ec5eadb137da8428642b3ffd1b8ca31fde4f6dff
|
[
"MIT"
] | null | null | null |
src/computer_vision/training/transfer_classifier.py
|
mldiego/Platooning-F1Tenth
|
ec5eadb137da8428642b3ffd1b8ca31fde4f6dff
|
[
"MIT"
] | null | null | null |
src/computer_vision/training/transfer_classifier.py
|
mldiego/Platooning-F1Tenth
|
ec5eadb137da8428642b3ffd1b8ca31fde4f6dff
|
[
"MIT"
] | null | null | null |
# import the neccessary packages
# import sys so we can use packages outside of this folder in
# either python 2 or python 3, I know it's janky, chill
import sys
import os
from pathlib import Path
#insert parent directory into the path
sys.path.insert(0,str(Path(os.path.abspath(__file__)).parent.parent))
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from nn.conv.fcheadnet import FCHeadNet
from preprocessing.utils import ImageUtils
# for data augmentation
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
#optimizers
from keras.optimizers import RMSprop
from keras.optimizers import SGD
from keras.applications import VGG16
from keras.utils import plot_model
from keras.applications import imagenet_utils
# layers, model
from keras.layers import Input
from keras.models import Model
# customary packages
import numpy as np
import argparse
import os
import cv2
# construct the argument parser
ap = argparse.ArgumentParser()
ap.add_argument("-d","--dataset",required=True,help="path to input dataset")
ap.add_argument("-m","--model",required=True,help="path to save the output model")
args = vars(ap.parse_args())
# initialize some constants
HEAD_TUNING_EPOCHS = 25
FINAL_TUNING_EPOCHS = 100
BATCH_SIZE = 32
HEIGHT= 224
WIDTH= 224
# define the image augmentation generator
aug= ImageDataGenerator(rotation_range=5, brightness_range=[0.5,1.5], zoom_range=[0.9,1.1],rescale=1.0/255.0,fill_mode="nearest")
#load the data
iu=ImageUtils()
data,labels=iu.load_from_directory(args['dataset'],HEIGHT,WIDTH,verbose=1)
print("[INFO] Imagenet preprocessing")
training_images=[]
#normalize the images
count = 1
for img in np.copy(data):
processed_image=imagenet_utils.preprocess_input(img)
training_images.append(processed_image)
if(count % 100 ==0):
print("Processed {} Images".format(count))
count +=1
data=np.asarray(training_images)
#convert the labels from integers to vectors
lb = LabelBinarizer()
labels=lb.fit_transform(labels)
#since there are not an equal amount of images in each category lets compute the class totals
classTotals = labels.sum(axis=0).astype('float')
classWeight = classTotals.max() / classTotals
print(lb.classes_,classTotals,classWeight)
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
#classification data
(trainX, testX, trainY, testY) = train_test_split(data,labels, test_size=0.20, stratify=labels, random_state=42)
# now comes the main event, the network surgery
baseModel = VGG16(weights='imagenet',include_top=False,input_tensor=Input(shape=(224,224,3)))
print(baseModel.summary())
# initialize the new head of the network, a set of FC layers
# followed by a softmax classifier
headModel=FCHeadNet.build(baseModel,testY.shape[1],256)
# place the head FC model on top of the base model -- this will
# become the actual model we will train
model = Model(inputs=baseModel.input,outputs=headModel)
print(model.summary())
plot_model(model, to_file='../plots/model.png')
# loop over all layers in the base model and freeze them so they
# will * not* be updated during the training process.
for layer in baseModel.layers:
layer.trainable = False
# compile our model. This needs to be done after our setting our
# layers to being non-trainable
print("[INFO] compiling model...")
opt = RMSprop(lr=0.001)
model.compile(loss='categorical_crossentropy',optimizer=opt, metrics=['accuracy'])
# train the head of the network for a few epochs (all other layers
# are frozen) -- this will allow the new FC layers to start to become
# initialized with actual learned values verus being purely random
print ('[INFO] training head...')
model.fit_generator(aug.flow(trainX,trainY,batch_size=BATCH_SIZE),
validation_data=(testX,testY),epochs=HEAD_TUNING_EPOCHS,
steps_per_epoch=len(trainX)//HEAD_TUNING_EPOCHS,verbose=1)
# evaluate the network after initialization
print("[INFO] evaluating after initialization")
predictions = model.predict(testX, batch_size = BATCH_SIZE)
print(classification_report(testY.argmax(axis=1),predictions.argmax(axis=1),target_names=lb.classes_))
# now that the head FC layers have been trained/initialized, lets
# unfreeze the final set of CONV layers and make them trainable
# the only reason that we didn't unfreeze the whole thing is that
# VGG is a deep architechture, if classification accuracy continues
# to improve (wihtout overfitting), you may consider unfreezing more
# layers in the body
for layer in baseModel.layers[15:]:
layer.trainable = True
# for the changes to the model to take affect we need to recompile the
# model, this time using SGD with a *very* small learning rate
print("[INFO] re-compiling model...")
opt = SGD(lr=0.001)
model.compile(loss='categorical_crossentropy',optimizer=opt,metrics=['accuracy'])
print ('[INFO] fine-tuning model...')
model.fit_generator(aug.flow(trainX,trainY,batch_size=BATCH_SIZE),
validation_data=(testX,testY),epochs=FINAL_TUNING_EPOCHS,
steps_per_epoch=len(trainX)//FINAL_TUNING_EPOCHS,verbose=1)
# evaluate the network after initialization
print("[INFO] evaluating after fine-tuning")
predictions = model.predict(testX, batch_size = BATCH_SIZE)
print(classification_report(testY.argmax(axis=1),predictions.argmax(axis=1),target_names=lb.classes_))
#save te model to disk
print("[INFO] serializing model...")
model.save(args['model'])
| 35.884615
| 129
| 0.770096
|
d52566b12cc891708dbf566aedfc3818d03677f4
| 18,211
|
py
|
Python
|
scout/server/blueprints/variants/views.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
scout/server/blueprints/variants/views.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
scout/server/blueprints/variants/views.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
"""Views for the variants"""
import datetime
import io
import logging
import os.path
import pathlib
import shutil
import zipfile
import pymongo
from flask import (
Blueprint,
abort,
current_app,
flash,
redirect,
request,
send_file,
session,
url_for,
)
from flask_login import current_user
from scout.constants import (
CANCER_TIER_OPTIONS,
MANUAL_RANK_OPTIONS,
SEVERE_SO_TERMS,
DISMISS_VARIANT_OPTIONS,
CANCER_SPECIFIC_VARIANT_DISMISS_OPTIONS,
)
from scout.server.extensions import store
from scout.server.utils import institute_and_case, templated
from . import controllers
from .forms import CancerFiltersForm, FiltersForm, StrFiltersForm, SvFiltersForm
LOG = logging.getLogger(__name__)
variants_bp = Blueprint(
"variants",
__name__,
template_folder="templates",
static_folder="static",
static_url_path="/variants/static",
)
@variants_bp.route("/<institute_id>/<case_name>/variants", methods=["GET", "POST"])
@templated("variants/variants.html")
def variants(institute_id, case_name):
"""Display a list of SNV variants."""
page = int(request.form.get("page", 1))
category = "snv"
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_type = request.args.get("variant_type", "clinical")
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
user_obj = store.user(current_user.email)
if request.method == "POST":
if request.form.getlist("dismiss"): # dismiss a list of variants
controllers.dismiss_variant_list(
store,
institute_obj,
case_obj,
"variant.variant",
request.form.getlist("dismiss"),
request.form.getlist("dismiss_choices"),
)
form = controllers.populate_filters_form(
store, institute_obj, case_obj, user_obj, category, request.form
)
else:
form = FiltersForm(request.args)
# set form variant data type the first time around
form.variant_type.data = variant_type
# set chromosome to all chromosomes
form.chrom.data = request.args.get("chrom", "")
if form.gene_panels.data == [] and variant_type == "clinical":
form.gene_panels.data = controllers.case_default_panels(case_obj)
# populate filters dropdown
available_filters = store.filters(institute_id, category)
form.filters.choices = [
(filter.get("_id"), filter.get("display_name")) for filter in available_filters
]
# populate available panel choices
form.gene_panels.choices = controllers.gene_panel_choices(institute_obj, case_obj)
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
# upload gene panel if symbol file exists
if request.files:
file = request.files[form.symbol_file.name]
if request.files and file and file.filename != "":
LOG.debug("Upload file request files: {0}".format(request.files.to_dict()))
try:
stream = io.StringIO(file.stream.read().decode("utf-8"), newline=None)
except UnicodeDecodeError as error:
flash("Only text files are supported!", "warning")
return redirect(request.referrer)
hgnc_symbols_set = set(form.hgnc_symbols.data)
LOG.debug("Symbols prior to upload: {0}".format(hgnc_symbols_set))
new_hgnc_symbols = controllers.upload_panel(store, institute_id, case_name, stream)
hgnc_symbols_set.update(new_hgnc_symbols)
form.hgnc_symbols.data = hgnc_symbols_set
# reset gene panels
form.gene_panels.data = ""
# check if supplied gene symbols exist
hgnc_symbols = []
non_clinical_symbols = []
not_found_symbols = []
not_found_ids = []
if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0:
is_clinical = form.data.get("variant_type", "clinical") == "clinical"
clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None
for hgnc_symbol in form.hgnc_symbols.data:
if hgnc_symbol.isdigit():
hgnc_gene = store.hgnc_gene(int(hgnc_symbol), case_obj["genome_build"])
if hgnc_gene is None:
not_found_ids.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_gene["hgnc_symbol"])
elif sum(1 for i in store.hgnc_genes(hgnc_symbol)) == 0:
not_found_symbols.append(hgnc_symbol)
elif is_clinical and (hgnc_symbol not in clinical_symbols):
non_clinical_symbols.append(hgnc_symbol)
else:
hgnc_symbols.append(hgnc_symbol)
if not_found_ids:
flash("HGNC id not found: {}".format(", ".join(not_found_ids)), "warning")
if not_found_symbols:
flash("HGNC symbol not found: {}".format(", ".join(not_found_symbols)), "warning")
if non_clinical_symbols:
flash(
"Gene not included in clinical list: {}".format(", ".join(non_clinical_symbols)),
"warning",
)
form.hgnc_symbols.data = hgnc_symbols
# handle HPO gene list separately
if "hpo" in form.data["gene_panels"]:
hpo_symbols = list(
set(term_obj["hgnc_symbol"] for term_obj in case_obj["dynamic_gene_list"])
)
current_symbols = set(hgnc_symbols)
current_symbols.update(hpo_symbols)
form.hgnc_symbols.data = list(current_symbols)
cytobands = store.cytoband_by_chrom(case_obj.get("genome_build"))
variants_query = store.variants(case_obj["_id"], query=form.data, category=category)
result_size = store.count_variants(case_obj["_id"], form.data, None, category)
# Setup variant count session with variant count by category
controllers.variant_count_session(store, institute_id, case_obj["_id"], variant_type, category)
session["filtered_variants"] = result_size
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.variants(store, institute_obj, case_obj, variants_query, result_size, page)
return dict(
institute=institute_obj,
case=case_obj,
form=form,
manual_rank_options=MANUAL_RANK_OPTIONS,
dismiss_variant_options=DISMISS_VARIANT_OPTIONS,
cancer_tier_options=CANCER_TIER_OPTIONS,
severe_so_terms=SEVERE_SO_TERMS,
cytobands=cytobands,
page=page,
expand_search=str(request.method == "POST"),
**data,
)
@variants_bp.route("/<institute_id>/<case_name>/str/variants")
@templated("variants/str-variants.html")
def str_variants(institute_id, case_name):
"""Display a list of STR variants."""
page = int(request.args.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "str"
form = StrFiltersForm(request.args)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
controllers.activate_case(store, institute_obj, case_obj, current_user)
query = form.data
query["variant_type"] = variant_type
variants_query = store.variants(case_obj["_id"], category=category, query=query).sort(
[
("str_repid", pymongo.ASCENDING),
("chromosome", pymongo.ASCENDING),
("position", pymongo.ASCENDING),
]
)
data = controllers.str_variants(store, institute_obj, case_obj, variants_query, page)
return dict(
institute=institute_obj,
case=case_obj,
dismiss_variant_options=DISMISS_VARIANT_OPTIONS,
variant_type=variant_type,
manual_rank_options=MANUAL_RANK_OPTIONS,
form=form,
page=page,
**data,
)
@variants_bp.route("/<institute_id>/<case_name>/sv/variants", methods=["GET", "POST"])
@templated("variants/sv-variants.html")
def sv_variants(institute_id, case_name):
"""Display a list of structural variants."""
page = int(request.form.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "sv"
# Define case and institute objects
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
if request.form.getlist("dismiss"): # dismiss a list of variants
controllers.dismiss_variant_list(
store,
institute_obj,
case_obj,
"variant.sv_variant",
request.form.getlist("dismiss"),
request.form.getlist("dismiss_choices"),
)
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
form = controllers.populate_sv_filters_form(store, institute_obj, case_obj, category, request)
cytobands = store.cytoband_by_chrom(case_obj.get("genome_build"))
variants_query = store.variants(case_obj["_id"], category=category, query=form.data)
# Setup variant count session with variant count by category
controllers.variant_count_session(store, institute_id, case_obj["_id"], variant_type, category)
result_size = store.count_variants(case_obj["_id"], form.data, None, category)
session["filtered_variants"] = result_size
# if variants should be exported
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.sv_variants(
store, institute_obj, case_obj, variants_query, result_size, page
)
return dict(
institute=institute_obj,
case=case_obj,
dismiss_variant_options=DISMISS_VARIANT_OPTIONS,
variant_type=variant_type,
form=form,
cytobands=cytobands,
severe_so_terms=SEVERE_SO_TERMS,
manual_rank_options=MANUAL_RANK_OPTIONS,
page=page,
expand_search=str(request.method == "POST"),
**data,
)
@variants_bp.route("/<institute_id>/<case_name>/cancer/variants", methods=["GET", "POST"])
@templated("variants/cancer-variants.html")
def cancer_variants(institute_id, case_name):
"""Show cancer variants overview."""
category = "cancer"
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
if request.method == "POST":
if request.form.getlist("dismiss"): # dismiss a list of variants
controllers.dismiss_variant_list(
store,
institute_obj,
case_obj,
"variant.variant",
request.form.getlist("dismiss"),
request.form.getlist("dismiss_choices"),
)
form = controllers.populate_filters_form(
store, institute_obj, case_obj, user_obj, category, request.form
)
# if user is not loading an existing filter, check filter form
if request.form.get("load_filter") is None and form.validate_on_submit() is False:
# Flash a message with errors
for field, err_list in form.errors.items():
for err in err_list:
flash(f"Content of field '{field}' has not a valid format", "warning")
# And do not submit the form
return redirect(
url_for(
".cancer_variants",
institute_id=institute_id,
case_name=case_name,
expand_search="True",
)
)
page = int(request.form.get("page", 1))
else:
page = int(request.args.get("page", 1))
form = CancerFiltersForm(request.args)
# set chromosome to all chromosomes
form.chrom.data = request.args.get("chrom", "")
if form.gene_panels.data == []:
form.gene_panels.data = controllers.case_default_panels(case_obj)
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
# populate filters dropdown
available_filters = store.filters(institute_id, category)
form.filters.choices = [
(filter.get("_id"), filter.get("display_name")) for filter in available_filters
]
form.gene_panels.choices = controllers.gene_panel_choices(institute_obj, case_obj)
cytobands = store.cytoband_by_chrom(case_obj.get("genome_build"))
variant_type = request.args.get("variant_type", "clinical")
variants_query = store.variants(case_obj["_id"], category="cancer", query=form.data)
result_size = store.count_variants(case_obj["_id"], form.data, None, category)
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.cancer_variants(
store, institute_id, case_name, variants_query, result_size, form, page=page
)
return dict(
variant_type=variant_type,
cytobands=cytobands,
dismiss_variant_options={
**DISMISS_VARIANT_OPTIONS,
**CANCER_SPECIFIC_VARIANT_DISMISS_OPTIONS,
},
expand_search=str(request.method == "POST"),
**data,
)
@variants_bp.route("/<institute_id>/<case_name>/cancer/sv-variants", methods=["GET", "POST"])
@templated("variants/cancer-sv-variants.html")
def cancer_sv_variants(institute_id, case_name):
"""Display a list of cancer structural variants."""
page = int(request.form.get("page", 1))
variant_type = request.args.get("variant_type", "clinical")
category = "cancer_sv"
# Define case and institute objects
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if request.form.get("hpo_clinical_filter"):
case_obj["hpo_clinical_filter"] = True
# update status of case if visited for the first time
controllers.activate_case(store, institute_obj, case_obj, current_user)
form = controllers.populate_sv_filters_form(store, institute_obj, case_obj, category, request)
cytobands = store.cytoband_by_chrom(case_obj.get("genome_build"))
variants_query = store.variants(case_obj["_id"], category=category, query=form.data)
# Setup variant count session with variant count by category
controllers.variant_count_session(store, institute_id, case_obj["_id"], variant_type, category)
result_size = store.count_variants(case_obj["_id"], form.data, None, category)
session["filtered_variants"] = result_size
# if variants should be exported
if request.form.get("export"):
return controllers.download_variants(store, case_obj, variants_query)
data = controllers.sv_variants(
store, institute_obj, case_obj, variants_query, result_size, page
)
return dict(
institute=institute_obj,
case=case_obj,
dismiss_variant_options={
**DISMISS_VARIANT_OPTIONS,
**CANCER_SPECIFIC_VARIANT_DISMISS_OPTIONS,
},
variant_type=variant_type,
form=form,
severe_so_terms=SEVERE_SO_TERMS,
cancer_tier_options=CANCER_TIER_OPTIONS,
manual_rank_options=MANUAL_RANK_OPTIONS,
cytobands=cytobands,
page=page,
expand_search=str(request.method == "POST"),
**data,
)
@variants_bp.route("/<institute_id>/<case_name>/upload", methods=["POST"])
def upload_panel(institute_id, case_name):
"""Parse gene panel file and fill in HGNC symbols for filter."""
panel_file = request.form.symbol_file.data
if panel_file.filename == "":
flash("No selected file", "warning")
return redirect(request.referrer)
try:
stream = io.StringIO(panel_file.stream.read().decode("utf-8"), newline=None)
except UnicodeDecodeError as error:
flash("Only text files are supported!", "warning")
return redirect(request.referrer)
category = request.args.get("category")
if category == "sv":
form = SvFiltersForm(request.args)
else:
form = FiltersForm(request.args)
hgnc_symbols = set(form.hgnc_symbols.data)
new_hgnc_symbols = controllers.upload_panel(store, institute_id, case_name, stream)
hgnc_symbols.update(new_hgnc_symbols)
form.hgnc_symbols.data = ",".join(hgnc_symbols)
# reset gene panels
form.gene_panels.data = ""
# HTTP redirect code 307 asks that the browser preserves the method of request (POST).
if category == "sv":
return redirect(
url_for(".sv_variants", institute_id=institute_id, case_name=case_name, **form.data),
code=307,
)
return redirect(
url_for(".variants", institute_id=institute_id, case_name=case_name, **form.data), code=307
)
@variants_bp.route("/verified", methods=["GET"])
def download_verified():
"""Download all verified variants for user's cases"""
user_obj = store.user(current_user.email)
user_institutes = user_obj.get("institutes")
temp_excel_dir = os.path.join(variants_bp.static_folder, "verified_folder")
os.makedirs(temp_excel_dir, exist_ok=True)
written_files = controllers.verified_excel_file(store, user_institutes, temp_excel_dir)
if written_files:
today = datetime.datetime.now().strftime("%Y-%m-%d")
# zip the files on the fly and serve the archive to the user
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as z:
for f_name in pathlib.Path(temp_excel_dir).iterdir():
z.write(f_name, os.path.basename(f_name))
data.seek(0)
# remove temp folder with excel files in it
shutil.rmtree(temp_excel_dir)
return send_file(
data,
mimetype="application/zip",
as_attachment=True,
attachment_filename="_".join(["scout", "verified_variants", today]) + ".zip",
cache_timeout=0,
)
flash("No verified variants could be exported for user's institutes", "warning")
return redirect(request.referrer)
| 37.165306
| 99
| 0.670419
|
e4499448b98c0fc26b314d9cfb8ac093d03d0cef
| 4,642
|
py
|
Python
|
inertia/views.py
|
zodman/inertia-django
|
a8515083371baf649982bd6bb0dddebac723143b
|
[
"Unlicense"
] | 77
|
2020-04-11T14:27:21.000Z
|
2022-03-31T16:16:42.000Z
|
inertia/views.py
|
zodman/inertia-django
|
a8515083371baf649982bd6bb0dddebac723143b
|
[
"Unlicense"
] | 13
|
2020-05-28T19:28:20.000Z
|
2022-02-11T10:19:59.000Z
|
inertia/views.py
|
zodman/inertia-django
|
a8515083371baf649982bd6bb0dddebac723143b
|
[
"Unlicense"
] | 19
|
2020-04-16T06:19:34.000Z
|
2022-01-23T17:33:49.000Z
|
import json
from inspect import signature
from django.core.exceptions import ImproperlyConfigured
from django.http.response import HttpResponse, HttpResponseRedirect
from django.views.generic import View
from django.views.generic.list import BaseListView
from django.views.generic.detail import BaseDetailView
from django.template.response import TemplateResponse
from django.shortcuts import render
from django.http import JsonResponse
from django.middleware import csrf
from django.urls import get_callable
from .share import share
from .version import get_version
from django.views.generic import View
from django.conf import settings
from django.core import serializers
from django.forms.models import model_to_dict
import logging
log = logging.getLogger(__name__)
def load_lazy_props(d, request):
for k, v in d.items():
if isinstance(v, dict):
load_lazy_props(v, request)
elif callable(v):
# evaluate prop and pass request if prop accept it
if len(signature(v).parameters) > 0:
d[k] = v(request)
else:
d[k] = v()
def _build_context(component_name, props, version, url):
context = {
"page": {
"version": version,
'url': url,
"component": component_name,
"props": props
},
}
return context
def render_inertia(request, component_name, props=None, template_name=None):
"""
Renders either an HttpRespone or JsonResponse of a component for
the use in an InertiaJS frontend integration.
"""
inertia_template = None
inertia_template = getattr(settings, "INERTIA_TEMPLATE", "base.html")
if template_name is not None:
inertia_template = template_name
if inertia_template is None:
raise ImproperlyConfigured(
"No Inertia template found. Either set INERTIA_TEMPLATE"
"in settings.py or pass template parameter."
)
# share custom data or default authenticated user
share_method_path = getattr(settings, "INERTIA_SHARE", "inertia.share.share_auth")
if share_method_path:
share_method = get_callable(share_method_path)
share_method(request)
if props is None:
props = {}
shared = {}
if hasattr(request, "session"):
for k, v in request.session.get("share", {}).items():
log.debug((k,v))
shared[k]=v
props.update(shared)
request.session['share']['flash'] = {'success': None, 'error': None}
request.session['share']['errors'] = {}
for key in ("success", "error", "errors"):
if hasattr(request, "session") and request.session.get(key):
del request.session[key]
# subsequent renders
inertia_version = get_version()
is_version_correct = 'X-Inertia-Version' in request.headers and \
request.headers["X-Inertia-Version"] == str(inertia_version)
# check if partial reload is requested
only_props = request.headers.get("X-Inertia-Partial-Data", [])
if (
only_props
and request.headers.get("X-Inertia-Partial-Component", "") == component_name
):
_props = {}
for key in props:
if key in only_props:
_props.update({key: props[key]})
else:
_props = props
# lazy load props and make request available to props being lazy loaded
load_lazy_props(_props, request)
if 'X-Inertia' in request.headers:
response = JsonResponse({
"component": component_name,
"props": _props,
"version": inertia_version,
"url": request.get_full_path()
})
response['X-Inertia'] = True
response['Vary'] = 'Accept'
return response
context = _build_context(component_name, _props,
inertia_version,
url=request.get_full_path())
return render(request, inertia_template, context)
def location(url):
"""Redirect to an external website, or even another non-Inertia endpoint in your app,
within an Inertia request."""
response = HttpResponse(status=409)
response["X-Inertia-Location"] = url
return response
class InertiaMixin:
component_name = ""
props = None
def get_data(self, context):
return context
def render_to_response(self, context, **kwargs):
if self.props is None:
self.props = {}
self.props.update(self.get_data(context))
return render_inertia(self.request, self.component_name, self.props, self.template_name)
| 31.794521
| 96
| 0.647997
|
668e03dc90e20dd48cf7d41300bcd7a1f6ed3182
| 1,122
|
py
|
Python
|
acscore/metrics.py
|
CheckiePy/CheckiePyCore
|
475ae75c4a30e4d7444fef218ffa7928fd4df4e2
|
[
"MIT"
] | 2
|
2017-08-27T15:04:09.000Z
|
2017-08-27T21:05:44.000Z
|
acscore/metrics.py
|
acsproj/acscore
|
475ae75c4a30e4d7444fef218ffa7928fd4df4e2
|
[
"MIT"
] | 2
|
2017-07-27T13:51:44.000Z
|
2017-08-05T06:54:10.000Z
|
acscore/metrics.py
|
CheckiePy/CheckiePyCore
|
475ae75c4a30e4d7444fef218ffa7928fd4df4e2
|
[
"MIT"
] | 3
|
2017-05-31T21:20:18.000Z
|
2017-08-03T20:16:47.000Z
|
from .metric.file_length import FileLength
from .metric.function_name_case import FunctionNameCase
from .metric.nesting_loops import NestingLoops
from .metric.class_name_case import ClassNameCase
from .metric.indent_type import IndentType
from .metric.quotes_type import QuotesType
from .metric.spaces_near_round_brackets import SpacesNearRoundBrackets
from .metric.spaces_near_braces import SpacesNearBraces
from .metric.spaces_near_square_brackets import SpacesNearSquareBrackets
from .metric.import_order import ImportOrder
from .metric.blank_before_function import BlankBeforeFunction
from .metric.blank_before_class import BlankBeforeClass
from .metric.blank_before_method import BlankBeforeMethod
IMPLEMENTED_METRICS = [
FileLength.__name__,
FunctionNameCase.__name__,
NestingLoops.__name__,
ClassNameCase.__name__,
IndentType.__name__,
QuotesType.__name__,
SpacesNearRoundBrackets.__name__,
SpacesNearBraces.__name__,
SpacesNearSquareBrackets.__name__,
ImportOrder.__name__,
BlankBeforeFunction.__name__,
BlankBeforeClass.__name__,
BlankBeforeMethod.__name__,
]
| 37.4
| 72
| 0.842246
|
dce6e5c8dc0bbaeec1e096d34aa98f71cba7ac2a
| 434
|
py
|
Python
|
instagram_api/response/direct_share_inbox.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | 13
|
2019-08-07T21:24:34.000Z
|
2020-12-12T12:23:50.000Z
|
instagram_api/response/direct_share_inbox.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | null | null | null |
instagram_api/response/direct_share_inbox.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | null | null | null |
from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
__all__ = ['DirectShareInboxResponse']
class DirectShareInboxResponseInterface(ApiResponseInterface):
shares: AnyType
max_id: str
new_shares: AnyType
patches: AnyType
last_counted_at: AnyType
new_shares_info: AnyType
class DirectShareInboxResponse(ApiResponse, DirectShareInboxResponseInterface):
pass
| 24.111111
| 79
| 0.799539
|
d27465e7e2b79ba732ca99e7cbc3e169d683dd79
| 11,678
|
py
|
Python
|
core/storage/file/gae_models_test.py
|
sagangwee/oppia
|
c4bf0673b4d3ec30cff609109241656f71a63a82
|
[
"Apache-2.0"
] | 3
|
2021-06-26T09:45:19.000Z
|
2021-11-17T11:11:39.000Z
|
core/storage/file/gae_models_test.py
|
sagangwee/oppia
|
c4bf0673b4d3ec30cff609109241656f71a63a82
|
[
"Apache-2.0"
] | 7
|
2019-08-20T08:30:43.000Z
|
2022-02-12T18:47:57.000Z
|
core/storage/file/gae_models_test.py
|
ledriod/oppia
|
4f8f95c6689cd36f0b65672b80d98a3463b001f8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.storage.file.gae_models."""
from core.platform import models
from core.tests import test_utils
import feconf
(file_models, base_models) = models.Registry.import_models(
[models.NAMES.file, models.NAMES.base_model])
class FileMetadataModelTest(test_utils.GenericTestBase):
"""Tests the FileMetadataModel class."""
def test_get_new_id_raises_not_implemented_error(self):
with self.assertRaises(NotImplementedError):
file_models.FileMetadataModel.get_new_id('entity1')
def test_get_undeleted_with_two_undeleted_models_returns_both(self):
file_metadata_model1 = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_metadata_model1.commit(feconf.SYSTEM_COMMITTER_ID, [])
file_metadata_model2 = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file2.png')
file_metadata_model2.commit(feconf.SYSTEM_COMMITTER_ID, [])
file_metadata_model_list = (
file_models.FileMetadataModel.get_undeleted())
self.assertEqual(len(file_metadata_model_list), 2)
self.assertEqual(
file_metadata_model1, file_metadata_model_list[0])
self.assertEqual(
file_metadata_model2, file_metadata_model_list[1])
def test_get_undeleted_with_a_deleted_and_undeleted_model_returns_undeleted(
self):
file_metadata_model1 = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_metadata_model1.commit(feconf.SYSTEM_COMMITTER_ID, [])
file_metadata_model2 = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file2.png')
file_metadata_model2.commit(feconf.SYSTEM_COMMITTER_ID, [])
file_metadata_model2.delete(
feconf.SYSTEM_COMMITTER_ID, 'Delete second file model')
file_metadata_model_list = (
file_models.FileMetadataModel.get_undeleted())
self.assertEqual(len(file_metadata_model_list), 1)
self.assertEqual(
file_metadata_model1, file_metadata_model_list[0])
def test_create_with_exp_id_not_in_filepath_and_false_deleted_status_creates_model( # pylint: disable=line-too-long
self):
file_metadata_model = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file1.png')
self.assertFalse(file_metadata_model.deleted)
self.assertEqual(
file_metadata_model.id, '/exploration/exp_id1/path/to/file1.png')
def test_create_with_exp_id_in_filepath_and_false_deleted_status_creates_model( # pylint: disable=line-too-long
self):
file_metadata_model = file_models.FileMetadataModel.create(
'exploration/exp_id1', '/exploration/exp_id1/path/to/file1.png')
self.assertFalse(file_metadata_model.deleted)
self.assertEqual(
file_metadata_model.id, '/exploration/exp_id1/path/to/file1.png')
def test_get_model_with_model_present_returns_the_correct_model(self):
file_metadata_model = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_metadata_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
retrieved_model = file_models.FileMetadataModel.get_model(
'exploration/exp_id1', 'path/to/file1.png')
self.assertEqual(retrieved_model, file_metadata_model)
def test_get_model_non_strict_with_no_model_present_returns_none(self):
retrieved_model = file_models.FileMetadataModel.get_model(
'exploration/exp_id1', 'path/to/file2.png')
self.assertIsNone(retrieved_model)
def test_get_model_strict_with_no_model_present_raises_error(self):
with self.assertRaisesRegexp(
base_models.BaseModel.EntityNotFoundError, (
'Entity for class FileMetadataModel with id.+?not found')):
file_models.FileMetadataModel.get_model(
'exploration/exp_id1', 'path/to/file2.png', True)
def test_get_version_with_version_present_returns_correct_model(self):
file_metadata_model = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_metadata_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
file_metadata_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
retrieved_model = file_models.FileMetadataModel.get_version(
'exploration/exp_id1', 'path/to/file1.png', 1)
self.assertEqual(file_metadata_model.key, retrieved_model.key)
self.assertEqual(retrieved_model.version, 1)
retrieved_model = file_models.FileMetadataModel.get_version(
'exploration/exp_id1', 'path/to/file1.png', 2)
self.assertEqual(file_metadata_model.key, retrieved_model.key)
self.assertEqual(retrieved_model.version, 2)
def test_get_version_with_version_absent_raises_error(self):
file_metadata_model = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_metadata_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
with self.assertRaisesRegexp(
base_models.BaseModel.EntityNotFoundError, (
'Entity for class FileMetadataSnapshotContentModel with id'
'.+?-2 not found')):
file_models.FileMetadataModel.get_version(
'exploration/exp_id1', 'path/to/file1.png', 2)
def test_commit_updates_version_of_stored_model(self):
file_metadata_model = file_models.FileMetadataModel.create(
'exploration/exp_id1', 'path/to/file1.png')
self.assertEqual(file_metadata_model.version, 0)
file_metadata_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
self.assertEqual(file_metadata_model.version, 1)
class FileModelTest(test_utils.GenericTestBase):
"""Tests the FileModel class."""
def test_file_model_content_is_reconstituted_correctly(self):
file_model = file_models.FileModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
file_model.content = 'file_contents'
commit_cmds = [{'cmd': 'edit'}]
file_model.commit(feconf.SYSTEM_COMMITTER_ID, commit_cmds)
retrieved_model = file_models.FileModel.get_version(
'exploration/exp_id1', 'path/to/file1.png', 2)
self.assertEqual(file_model.key, retrieved_model.key)
self.assertEqual(retrieved_model.content, 'file_contents')
def test_initial_file_model_has_no_content(self):
file_model = file_models.FileModel.create(
'exploration/exp_id1', 'path/to/file1.png')
self.assertIsNone(file_model.content)
def test_file_model_snapshot_includes_file_model_content(self):
file_model = file_models.FileModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_model.content = 'file_contents'
file_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
self.assertEqual(file_model.content, 'file_contents')
def test_get_new_id_raises_not_implemented_error(self):
with self.assertRaises(NotImplementedError):
file_models.FileModel.get_new_id('entity1')
def test_create_with_exp_id_not_in_filepath_and_false_deleted_status_creates_model( # pylint: disable=line-too-long
self):
file_model1 = file_models.FileModel.create(
'exploration/exp_id1', 'path/to/file1.png')
self.assertFalse(file_model1.deleted)
self.assertEqual(
file_model1.id, '/exploration/exp_id1/path/to/file1.png')
def test_create_with_exp_id_in_filepath_and_false_deleted_status_creates_model( # pylint: disable=line-too-long
self):
file_model = file_models.FileModel.create(
'exploration/exp_id1', '/exploration/exp_id1/path/to/file1.png')
self.assertFalse(file_model.deleted)
self.assertEqual(
file_model.id, '/exploration/exp_id1/path/to/file1.png')
def test_get_model_with_model_present_returns_the_correct_model(self):
file_model = file_models.FileModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
retrieved_model = file_models.FileModel.get_model(
'exploration/exp_id1', 'path/to/file1.png')
self.assertEqual(retrieved_model, file_model)
def test_get_model_non_strict_with_no_model_present_returns_none(self):
retrieved_model = file_models.FileModel.get_model(
'exploration/exp_id1', 'path/to/file2.png')
self.assertIsNone(retrieved_model)
def test_get_model_strict_with_no_model_present_raises_erro(self):
with self.assertRaisesRegexp(
base_models.BaseModel.EntityNotFoundError, (
'Entity for class FileModel with id.+?not found')):
file_models.FileModel.get_model(
'exploration/exp_id1', 'path/to/file2.png', True)
def test_commit_updates_version_of_stored_model(self):
file_model = file_models.FileModel.create(
'exploration/exp_id1', 'path/to/file1.png')
self.assertEqual(file_model.version, 0)
file_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
self.assertEqual(file_model.version, 1)
def test_get_version_with_version_present_returns_correct_model(self):
file_model = file_models.FileModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_model.content = 'file_contents_after_first_commit'
file_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
file_model.content = 'file_contents_after_second_commit'
file_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
retrieved_model = file_models.FileModel.get_version(
'exploration/exp_id1', 'path/to/file1.png', 1)
self.assertEqual(file_model.key, retrieved_model.key)
self.assertEqual(
retrieved_model.content, 'file_contents_after_first_commit')
self.assertEqual(retrieved_model.version, 1)
retrieved_model = file_models.FileModel.get_version(
'exploration/exp_id1', 'path/to/file1.png', 2)
self.assertEqual(file_model.key, retrieved_model.key)
self.assertEqual(
retrieved_model.content, 'file_contents_after_second_commit')
self.assertEqual(retrieved_model.version, 2)
def test_get_version_with_version_absent_raises_error(self):
file_model = file_models.FileModel.create(
'exploration/exp_id1', 'path/to/file1.png')
file_model.commit(feconf.SYSTEM_COMMITTER_ID, [])
with self.assertRaisesRegexp(
base_models.BaseModel.EntityNotFoundError, (
'Entity for class FileSnapshotContentModel with id'
'.+?-2 not found')):
file_models.FileModel.get_version(
'exploration/exp_id1', 'path/to/file1.png', 2)
| 46.158103
| 119
| 0.711081
|
8805faf68cbab58b1bfb25bb3c923dd18eed6c35
| 3,999
|
py
|
Python
|
pyta/python_ta/reporters/node_printers.py
|
AbChatt/Tweet-Analyser-Python
|
2953137b021a71d65fe6a83e6d4b87be36d4039b
|
[
"MIT"
] | null | null | null |
pyta/python_ta/reporters/node_printers.py
|
AbChatt/Tweet-Analyser-Python
|
2953137b021a71d65fe6a83e6d4b87be36d4039b
|
[
"MIT"
] | null | null | null |
pyta/python_ta/reporters/node_printers.py
|
AbChatt/Tweet-Analyser-Python
|
2953137b021a71d65fe6a83e6d4b87be36d4039b
|
[
"MIT"
] | null | null | null |
"""Specify how errors should be rendered."""
import astroid
from enum import Enum
def render_message(msg, source_lines):
"""Render a message based on type."""
renderer = CUSTOM_MESSAGES.get(msg.symbol, render_generic)
yield from renderer(msg, source_lines)
def render_generic(msg, source_lines=None):
"""Default rendering for a message."""
if hasattr(msg, 'node') and msg.node is not None:
node = msg.node
start_line, start_col = node.fromlineno, node.col_offset
end_line, end_col = node.end_lineno, node.end_col_offset
# Display up to 2 lines before node for context:
yield from render_context(start_line - 2, start_line, source_lines)
if start_line == end_line:
yield (start_line, slice(start_col, end_col), LineType.ERROR, source_lines[start_line-1])
else:
yield (start_line, slice(start_col, None), LineType.ERROR, source_lines[start_line-1])
yield from ((line, slice(None, None), LineType.ERROR, source_lines[line-1]) for line in range(start_line+1, end_line))
yield (end_line, slice(None, end_col), LineType.ERROR, source_lines[end_line-1])
# Display up to 2 lines after node for context:
yield from render_context(end_line + 1, end_line + 3, source_lines)
else:
line = msg.line
yield from render_context(line - 2, line, source_lines)
yield (line, slice(None, None), LineType.ERROR, source_lines[line - 1])
yield from render_context(line + 1, line + 3, source_lines)
def render_missing_docstring(msg, source_lines=None):
"""Render a missing docstring message"""
if isinstance(msg.node, astroid.Module):
yield (None, slice(None, None), LineType.DOCSTRING, '"""YOUR DOCSTRING HERE"""')
yield from render_context(1, 3, source_lines)
elif isinstance(msg.node, astroid.ClassDef) or isinstance(msg.node, astroid.FunctionDef):
start = msg.node.fromlineno
end = msg.node.body[0].fromlineno
yield from render_context(start, end, source_lines)
# Calculate indentation
body = source_lines[end-1]
indentation = len(body) - len(body.lstrip())
yield (None, slice(None, None), LineType.DOCSTRING,
body[:indentation] + '"""YOUR DOCSTRING HERE"""')
yield from render_context(end, end + 2, source_lines)
def render_trailing_newlines(msg, source_lines=None):
start_line = msg.line - 1
yield from render_context(start_line - 2, start_line, source_lines)
yield from ((line, slice(None, None), LineType.OTHER, source_lines[line-1])
for line in range(start_line, len(source_lines) + 1))
def render_context(start, stop, source_lines):
"""Helper for rendering context lines."""
start, stop = max(start, 1), min(stop, len(source_lines))
yield from ((line, slice(None, None), LineType.CONTEXT, source_lines[line-1])
for line in range(start, stop))
def render_bad_whitespace(msg, source_lines=None):
"""Extract column information from caret position within message string"""
start, stop = None, None
last_line = msg.msg.split('\n')[-1]
if '^' in last_line:
start = last_line.index('^')
stop = start + 1
line = msg.line
yield from render_context(line - 2, line, source_lines)
yield (line, slice(start, stop), LineType.ERROR, source_lines[line - 1])
yield from render_context(line + 1, line + 3, source_lines)
CUSTOM_MESSAGES = {
'missing-docstring': render_missing_docstring,
'trailing-newlines': render_trailing_newlines,
'bad-whitespace': render_bad_whitespace,
}
class LineType(Enum):
"""An enumeration for _add_line method line types."""
ERROR = 1 # line with error
CONTEXT = 2 # non-error/other line added for context
OTHER = 3 # line included in source but not error
ELLIPSIS = 5 # code replaced with ellipsis
DOCSTRING = 6 # docstring needed warning
| 40.806122
| 130
| 0.674169
|
74618184653e401f2187f2bdfd163c694e2b2110
| 20,577
|
py
|
Python
|
cycle_gan/cycle_gan.py
|
asahi417/CycleGAN
|
44ab135f6666bd976299a2d86cc785f20c3b7860
|
[
"MIT"
] | 2
|
2019-05-20T05:14:53.000Z
|
2021-04-22T16:50:14.000Z
|
cycle_gan/cycle_gan.py
|
asahi417/CycleGAN
|
44ab135f6666bd976299a2d86cc785f20c3b7860
|
[
"MIT"
] | null | null | null |
cycle_gan/cycle_gan.py
|
asahi417/CycleGAN
|
44ab135f6666bd976299a2d86cc785f20c3b7860
|
[
"MIT"
] | 1
|
2021-02-20T09:59:24.000Z
|
2021-02-20T09:59:24.000Z
|
import os
import tensorflow as tf
import numpy as np
from .dataset_tool import tfrecord_parser
from .util import create_log
from .util_tf import generator_resnet, discriminator_patch
DATA_TYPE = ['trainB', 'trainA', 'testA', 'testB']
class CycleGAN:
"""
Only valid for images with shape [256, 256, 3]
"""
def __init__(self,
tfrecord_dir: str,
checkpoint_dir: str,
image_shape: list,
cyclic_lambda_a: float,
cyclic_lambda_b: float,
identity_lambda: float,
learning_rate: float=None,
buffer_size: int = 50,
batch: int = 10,
optimizer: str = 'sgd',
debug: bool = True,
n_thread: int = 4,
log_img_size: int = 5
):
self.__ini_learning_rate = learning_rate
self.__checkpoint_dir = checkpoint_dir
self.__checkpoint = '%s/model.ckpt' % checkpoint_dir
self.__log_img_size = log_img_size
self.__identity_lambda = identity_lambda
self.__buffer_size = buffer_size
self.__image_shape = image_shape
self.__cyclic_lambda_a = cyclic_lambda_a
self.__cyclic_lambda_b = cyclic_lambda_b
self.__base_batch = batch
self.__optimizer = optimizer
self.__logger = create_log('%s/log' % checkpoint_dir) if debug else None
self.__n_thread = n_thread
self.tfrecord_dir = tfrecord_dir
self.__build_network()
self.session = tf.Session(config=tf.ConfigProto(log_device_placement=False))
self.writer = tf.summary.FileWriter('%s/summary' % self.__checkpoint_dir, self.session.graph)
# Load model
if os.path.exists('%s.meta' % self.__checkpoint):
self.__log('load variable from %s' % self.__checkpoint)
self.__saver.restore(self.session, self.__checkpoint)
self.__warm_start = True
else:
os.makedirs(self.__checkpoint_dir, exist_ok=True)
self.session.run(tf.global_variables_initializer())
self.__warm_start = False
def __tfreocrd(self, record_name, batch, seed=None):
seed = tf.cast(seed, tf.int64) if seed is not None else seed
data_set_api = tf.data.TFRecordDataset(record_name, compression_type='GZIP')
# convert record to tensor
data_set_api = data_set_api.map(tfrecord_parser(self.__image_shape), self.__n_thread)
# set batch size
data_set_api = data_set_api.shuffle(buffer_size=10000, seed=seed)
data_set_api = data_set_api.batch(tf.cast(batch, tf.int64))
# make iterator
iterator = tf.data.Iterator.from_structure(data_set_api.output_types, data_set_api.output_shapes)
iterator_ini = iterator.make_initializer(data_set_api)
return iterator, iterator_ini
def __build_network(self):
##########
# config #
##########
self.__tfrecord_a = tf.placeholder(tf.string, name='tfrecord_a')
self.__tfrecord_b = tf.placeholder(tf.string, name='tfrecord_b')
self.__batch = tf.placeholder_with_default(self.__base_batch, [], name='batch')
#########
# input #
#########
img_iterator_a, self.iterator_ini_a = self.__tfreocrd(self.__tfrecord_a, self.__batch)
img_iterator_b, self.iterator_ini_b = self.__tfreocrd(self.__tfrecord_b, self.__batch)
self.img_a = img_iterator_a.get_next()
self.img_b = img_iterator_b.get_next()
###############
# placeholder #
###############
img_shape = [None] + self.__image_shape
# original images from domain A and B
self.__original_img_a = tf.placeholder(tf.float32, img_shape, name='original_img_a')
self.__original_img_b = tf.placeholder(tf.float32, img_shape, name='original_img_b')
# will sampled from buffered generated images
self.__fake_img_a_buffer = tf.placeholder(tf.float32, img_shape, name='fake_img_a_buffer')
self.__fake_img_b_buffer = tf.placeholder(tf.float32, img_shape, name='fake_img_b_buffer')
self.__learning_rate = tf.placeholder_with_default(0.0, [], name='learning_rate')
original_img_a_norm = (self.__original_img_a / 255 - 0.5) * 2
original_img_b_norm = (self.__original_img_b / 255 - 0.5) * 2
#############
# generator #
#############
with tf.name_scope('generators'):
# generator from A to B
self.fake_img_b = generator_resnet(original_img_a_norm, scope='generator_a')
self.cycle_img_a = generator_resnet(self.fake_img_b, scope='generator_b')
# generator from B to A
self.fake_img_a = generator_resnet(original_img_b_norm, scope='generator_b', reuse=True)
self.cycle_img_b = generator_resnet(self.fake_img_a, scope='generator_a', reuse=True)
self.id_a = generator_resnet(original_img_a_norm, scope='generator_b', reuse=True)
self.id_b = generator_resnet(original_img_b_norm, scope='generator_a', reuse=True)
if self.__identity_lambda != 0.0:
# self.id_a = generator_resnet(original_img_a_norm, scope='generator_b', reuse=True)
id_loss_a = tf.reduce_mean(tf.abs(original_img_a_norm - self.id_a))
# self.id_b = generator_resnet(original_img_b_norm, scope='generator_a', reuse=True)
id_loss_b = tf.reduce_mean(tf.abs(original_img_b_norm - self.id_b))
else:
id_loss_a = id_loss_b = 0.0
#################
# discriminator #
#################
with tf.name_scope('discriminators'):
# logit for update generator A
logit_fake_a_generator = discriminator_patch(self.fake_img_a, scope='discriminator_a')
# logit for update generator B
logit_fake_b_generator = discriminator_patch(self.fake_img_b, scope='discriminator_b')
# logit for update discriminator A
logit_fake_a = discriminator_patch(self.__fake_img_a_buffer, scope='discriminator_a', reuse=True)
logit_original_a = discriminator_patch(original_img_a_norm, scope='discriminator_a', reuse=True)
# logit for update discriminator B
logit_fake_b = discriminator_patch(self.__fake_img_b_buffer, scope='discriminator_b', reuse=True)
logit_original_b = discriminator_patch(original_img_b_norm, scope='discriminator_b', reuse=True)
########
# loss #
########
with tf.name_scope('loss'): # adversarial loss (least square loss, known as LSGAN)
gen_loss_a = tf.reduce_mean(tf.squared_difference(logit_fake_a_generator, 1))
gen_loss_b = tf.reduce_mean(tf.squared_difference(logit_fake_b_generator, 1))
disc_loss_a = (tf.reduce_mean(tf.squared_difference(logit_fake_a, 0)) +
tf.reduce_mean(tf.squared_difference(logit_original_a, 1))) * 0.5
disc_loss_b = (tf.reduce_mean(tf.squared_difference(logit_fake_b, 0)) +
tf.reduce_mean(tf.squared_difference(logit_original_b, 1))) * 0.5
# cycle consistency loss
cycle_loss_a = tf.reduce_mean(tf.abs(original_img_a_norm - self.cycle_img_a))
cycle_loss_b = tf.reduce_mean(tf.abs(original_img_b_norm - self.cycle_img_b))
################
# optimization #
################
with tf.name_scope('optimization'):
if self.__optimizer == 'adam':
optimizer_g_a = tf.train.AdamOptimizer(self.__learning_rate)
optimizer_g_b = tf.train.AdamOptimizer(self.__learning_rate)
optimizer_d_a = tf.train.AdamOptimizer(self.__learning_rate)
optimizer_d_b = tf.train.AdamOptimizer(self.__learning_rate)
else:
raise ValueError('unknown optimizer !!')
var_gen_a = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator_a')
var_gen_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator_b')
var_disc_a = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_a')
var_disc_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_b')
cycle_loss = cycle_loss_a * self.__cyclic_lambda_a + cycle_loss_b * self.__cyclic_lambda_b
self.train_op_gen_a = optimizer_g_a.minimize(
gen_loss_b + cycle_loss + id_loss_b * self.__cyclic_lambda_a * self.__identity_lambda,
var_list=var_gen_a)
self.train_op_gen_b = optimizer_g_b.minimize(
gen_loss_a + cycle_loss + id_loss_a * self.__cyclic_lambda_b * self.__identity_lambda,
var_list=var_gen_b)
self.train_op_disc_a = optimizer_d_a.minimize(
disc_loss_a,
var_list=var_disc_a)
self.train_op_disc_b = optimizer_d_b.minimize(
disc_loss_b,
var_list=var_disc_b)
# logging
n_var = 0
for var in tf.trainable_variables():
sh = var.get_shape().as_list()
self.__log('%s: %s' % (var.name, str(sh)))
n_var += np.prod(sh)
self.__log('total variables: %i' % n_var)
# saver
self.__saver = tf.train.Saver()
##################
# scalar summary #
##################
def image_form(float_img):
img_int = tf.floor((float_img + 1) * 255 / 2)
return tf.cast(img_int, tf.uint8)
self.summary_hyperparameter = tf.summary.merge([
tf.summary.scalar('hyperparameter_cyclic_lambda_a', self.__cyclic_lambda_a),
tf.summary.scalar('hyperparameter_cyclic_lambda_b', self.__cyclic_lambda_b),
tf.summary.scalar('hyperparameter_image_shape_w', self.__image_shape[0]),
tf.summary.scalar('hyperparameter_image_shape_h', self.__image_shape[1]),
tf.summary.scalar('hyperparameter_image_shape_c', self.__image_shape[2]),
tf.summary.scalar('hyperparameter_buffer_size', self.__buffer_size),
tf.summary.scalar('hyperparameter_batch', self.__base_batch)
])
self.summary_train_gen_a = tf.summary.merge([
tf.summary.scalar('meta_learning_rate', self.__learning_rate),
tf.summary.scalar('loss_gen_a', gen_loss_a),
tf.summary.scalar('loss_cycle_a', cycle_loss_a),
tf.summary.scalar('loss_id_a', id_loss_b)
])
self.summary_train_gen_b = tf.summary.merge([
tf.summary.scalar('loss_gen_b', gen_loss_b),
tf.summary.scalar('loss_cycle_b', cycle_loss_b),
tf.summary.scalar('loss_id_b', id_loss_a)
])
self.summary_train_disc_a = tf.summary.merge([
tf.summary.scalar('loss_disc_a', disc_loss_a),
tf.summary.image('buffer_a', image_form(self.__fake_img_a_buffer))
])
self.summary_train_disc_b = tf.summary.merge([
tf.summary.scalar('loss_disc_b', disc_loss_b),
tf.summary.image('buffer_b', image_form(self.__fake_img_b_buffer))
])
self.summary_image = tf.summary.merge([
tf.summary.image('original_a', self.__original_img_a, self.__log_img_size),
tf.summary.image('fake_b', image_form(self.fake_img_b), self.__log_img_size),
tf.summary.image('cycled_a', image_form(self.cycle_img_a), self.__log_img_size),
tf.summary.image('original_b', self.__original_img_b, self.__log_img_size),
tf.summary.image('fake_a', image_form(self.fake_img_a), self.__log_img_size),
tf.summary.image('cycled_b', image_form(self.cycle_img_b), self.__log_img_size)
])
def train(self, epoch: int, progress_interval: int = 1):
def shuffle_data(data, seed=None):
"""shuffle array along first axis"""
np.random.seed(seed)
np.random.shuffle(data)
return data
def learning_rate_scheduler(current_lr, current_epoch):
""" heuristic scheduler used in original paper """
bias = 2.0e-6
if current_epoch > 100:
return np.max([current_lr - bias, 0])
else:
return current_lr
if self.__warm_start:
meta = np.load('%s/meta.npz' % self.__checkpoint_dir)
learning_rate = meta['learning_rate']
buffer_a = meta['buffer_a']
buffer_b = meta['buffer_b']
buffer_ind = meta['buffer_ind']
ini_epoch = meta['epoch']
i_summary = meta['i_summary']
else:
learning_rate = self.__ini_learning_rate
buffer_a = np.zeros(tuple([self.__buffer_size] + self.__image_shape))
buffer_b = np.zeros(tuple([self.__buffer_size] + self.__image_shape))
buffer_ind = 0
ini_epoch = 0
i_summary = 0
# write hyperparameters to tensorboad
sums = self.session.run(self.summary_hyperparameter)
self.writer.add_summary(sums, 0)
e = -1
for e in range(ini_epoch, ini_epoch + epoch):
self.session.run([self.iterator_ini_a, self.iterator_ini_b],
feed_dict={
self.__tfrecord_a: '%s/trainA.tfrecord' % self.tfrecord_dir,
self.__tfrecord_b: '%s/trainB.tfrecord' % self.tfrecord_dir
})
n = 0
learning_rate = learning_rate_scheduler(learning_rate, e)
# TRAIN
while True:
n += 1
try:
# get input image
img_a, img_b = self.session.run(
[self.img_a, self.img_b],
feed_dict={self.__learning_rate: learning_rate})
# train generator A
summary, fake_img_a, _ = self.session.run([
self.summary_train_gen_a,
self.fake_img_a,
self.train_op_gen_a
],
feed_dict={
self.__learning_rate: learning_rate,
self.__original_img_a: img_a,
self.__original_img_b: img_b}
)
self.writer.add_summary(summary, i_summary)
# train generator B
summary, fake_img_b, _ = self.session.run([
self.summary_train_gen_b,
self.fake_img_b,
self.train_op_gen_b
],
feed_dict={
self.__learning_rate: learning_rate,
self.__original_img_a: img_a,
self.__original_img_b: img_b}
)
self.writer.add_summary(summary, i_summary)
# buffering generated images
if buffer_ind > self.__buffer_size - 1:
# TODO: this works with only batch size `1`. Extend in general case
if np.random.rand() > 0.5:
sampled_fake_a = fake_img_a
sampled_fake_b = fake_img_b
else:
# sample from buffered a
buffer_a = shuffle_data(buffer_a)
sampled_fake_a = buffer_a[0:1, :, :, :]
buffer_a[0, :, :, :] = fake_img_a[0]
# sample from buffered b
buffer_b = shuffle_data(buffer_b)
sampled_fake_b = buffer_b[0:1, :, :, :]
buffer_b[0, :, :, :] = fake_img_b[0]
else:
sampled_fake_a = fake_img_a
sampled_fake_b = fake_img_b
buffer_a[buffer_ind, :, :, :] = sampled_fake_a
buffer_b[buffer_ind, :, :, :] = sampled_fake_b
buffer_ind += 1
# train discriminator A
summary, _ = self.session.run([
self.summary_train_disc_a,
self.train_op_disc_a
],
feed_dict={
self.__learning_rate: learning_rate,
self.__original_img_a: img_a,
self.__fake_img_a_buffer: sampled_fake_a}
)
self.writer.add_summary(summary, i_summary)
# train discriminator B
summary, _ = self.session.run([
self.summary_train_disc_b,
self.train_op_disc_b
],
feed_dict={
self.__learning_rate: learning_rate,
self.__original_img_b: img_b,
self.__fake_img_b_buffer: sampled_fake_b}
)
self.writer.add_summary(summary, i_summary)
if progress_interval is not None and n % progress_interval == 0:
print('epoch %i-%i\r' % (e, n), end='', flush=True)
i_summary += 1
except tf.errors.OutOfRangeError:
print()
self.__log('epoch %i:' % e)
# produce images from validation data
self.session.run([self.iterator_ini_a, self.iterator_ini_b],
feed_dict={
self.__tfrecord_a: '%s/testA.tfrecord' % self.tfrecord_dir,
self.__tfrecord_b: '%s/testB.tfrecord' % self.tfrecord_dir,
self.__batch: self.__log_img_size
})
img_a, img_b = self.session.run([self.img_a, self.img_b])
summary = self.session.run(self.summary_image,
feed_dict={self.__original_img_a: img_a, self.__original_img_b: img_b})
self.writer.add_summary(summary, e)
break
self.__saver.save(self.session, self.__checkpoint)
np.savez('%s/meta.npz' % self.__checkpoint_dir,
learning_rate=learning_rate,
buffer_a=buffer_a,
buffer_b=buffer_b,
buffer_ind=buffer_ind,
i_summary=i_summary,
epoch=e + 1)
def generate_img(self, batch):
""" Return generated img
:param batch: number of img
:return: 0~255, uint, numpy array
[original_a, fake_from_a, cycle_a, identity_a, original_b, fake_from_b, cycle_b, identity_b]
"""
def form_img(target_array):
target_array = (target_array+1)/2*255
target_array = target_array.astype(np.uint8)
return target_array
self.session.run([self.iterator_ini_a, self.iterator_ini_b],
feed_dict={
self.__tfrecord_a: '%s/testA.tfrecord' % self.tfrecord_dir,
self.__tfrecord_b: '%s/testB.tfrecord' % self.tfrecord_dir,
self.__batch: 1
})
result = []
for b in range(batch):
img_a, img_b = self.session.run([self.img_a, self.img_b])
imgs = self.session.run([
self.fake_img_b, self.cycle_img_a, self.id_a,
self.fake_img_a, self.cycle_img_b, self.id_b
],
feed_dict={self.__original_img_a: img_a, self.__original_img_b: img_b}
)
result.append([
img_a.astype(np.uint8), form_img(imgs[0]), form_img(imgs[1]), form_img(imgs[2]),
img_b.astype(np.uint8), form_img(imgs[3]), form_img(imgs[4]), form_img(imgs[5])
])
return result
def __log(self, statement):
if self.__logger is not None:
self.__logger.info(statement)
| 45.524336
| 118
| 0.565146
|
c609eebaf2478d42917fbf6d3029ebf5fd8da01d
| 7,615
|
py
|
Python
|
predict.py
|
The-ML-Hero/Robo-Semantic-Segmentation
|
17904ebc7d2f0f388da641140daac6f6602ef5ea
|
[
"MIT"
] | 5
|
2021-01-27T13:09:13.000Z
|
2021-01-28T12:20:11.000Z
|
predict.py
|
The-ML-Hero/Robo-Semantic-Segmentation
|
17904ebc7d2f0f388da641140daac6f6602ef5ea
|
[
"MIT"
] | null | null | null |
predict.py
|
The-ML-Hero/Robo-Semantic-Segmentation
|
17904ebc7d2f0f388da641140daac6f6602ef5ea
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from eval import eval_net
from unet import UNet
from torch.utils.tensorboard import SummaryWriter
from utils.dataset import BasicDataset,CarvanaDataset
from torch.utils.data import DataLoader, random_split
dir_img = '/content/data/data/membrane/train/image/'
dir_mask = '/content/data/data/membrane/train/label/'
dir_checkpoint = 'checkpoints/'
def train_net(net,
device,
epochs=5,
batch_size=1,
lr=0.001,
val_percent=0.1,
save_cp=True,
img_scale=0.5):
dataset = BasicDataset(dir_img, dir_mask, img_scale)
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train, val = random_split(dataset, [n_train, n_val])
train_loader = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
val_loader = DataLoader(val, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True, drop_last=True)
writer = SummaryWriter(comment=f'LR_{lr}_BS_{batch_size}_SCALE_{img_scale}')
global_step = 0
logging.info(f'''Starting training:
Epochs: {epochs}
Batch size: {batch_size}
Learning rate: {lr}
Training size: {n_train}
Validation size: {n_val}
Checkpoints: {save_cp}
Device: {device.type}
Images scaling: {img_scale}
''')
optimizer = optim.RMSprop(net.parameters(), lr=lr, weight_decay=1e-8, momentum=0.9)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min' if net.n_classes > 1 else 'max', patience=2)
if net.n_classes > 1:
criterion = nn.CrossEntropyLoss()
else:
criterion = nn.BCEWithLogitsLoss()
for epoch in range(epochs):
net.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
imgs = batch['image']
true_masks = batch['mask']
assert imgs.shape[1] == net.n_channels, \
f'Network has been defined with {net.n_channels} input channels, ' \
f'but loaded images have {imgs.shape[1]} channels. Please check that ' \
'the images are loaded correctly.'
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
masks_pred = net(imgs)
loss = criterion(masks_pred, true_masks)
epoch_loss += loss.item()
writer.add_scalar('Loss/train', loss.item(), global_step)
pbar.set_postfix(**{'loss (batch)': loss.item()})
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_value_(net.parameters(), 0.1)
optimizer.step()
pbar.update(imgs.shape[0])
global_step += 1
if global_step % (n_train // (10 * batch_size)) == 0:
for tag, value in net.named_parameters():
tag = tag.replace('.', '/')
writer.add_histogram('weights/' + tag, value.data.cpu().numpy(), global_step)
writer.add_histogram('grads/' + tag, value.grad.data.cpu().numpy(), global_step)
val_score = eval_net(net, val_loader, device)
scheduler.step(val_score)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], global_step)
if net.n_classes > 1:
logging.info('Validation cross entropy: {}'.format(val_score))
writer.add_scalar('Loss/test', val_score, global_step)
else:
logging.info('Validation Dice Coeff: {}'.format(val_score))
writer.add_scalar('Dice/test', val_score, global_step)
writer.add_images('images', imgs, global_step)
if net.n_classes == 1:
writer.add_images('masks/true', true_masks, global_step)
writer.add_images('masks/pred', torch.sigmoid(masks_pred) > 0.5, global_step)
if save_cp:
try:
os.mkdir(dir_checkpoint)
logging.info('Created checkpoint directory')
except OSError:
pass
torch.save(net.state_dict(),
dir_checkpoint + f'CP_epoch{epoch + 1}.pth')
logging.info(f'Checkpoint {epoch + 1} saved !')
writer.close()
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=5,
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=1,
help='Batch size', dest='batchsize')
parser.add_argument('-l', '--learning-rate', metavar='LR', type=float, nargs='?', default=0.0001,
help='Learning rate', dest='lr')
parser.add_argument('-f', '--load', dest='load', type=str, default=False,
help='Load model from a .pth file')
parser.add_argument('-s', '--scale', dest='scale', type=float, default=0.5,
help='Downscaling factor of the images')
parser.add_argument('-v', '--validation', dest='val', type=float, default=10.0,
help='Percent of the data that is used as validation (0-100)')
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Using device {device}')
# Change here to adapt to your data
# n_channels=3 for RGB images
# n_classes is the number of probabilities you want to get per pixel
# - For 1 class and background, use n_classes=1
# - For 2 classes, use n_classes=1
# - For N > 2 classes, use n_classes=N
net = UNet(n_channels=1, n_classes=1, bilinear=True)
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n'
f'\t{"Bilinear" if net.bilinear else "Transposed conv"} upscaling')
if args.load:
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
net.to(device=device)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device,
img_scale=args.scale,
val_percent=args.val / 100)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 40.505319
| 118
| 0.580565
|
9c8e402b2290299a3cd3785de2a4be4563095858
| 4,120
|
py
|
Python
|
tests/test_preprocess_winograd.py
|
PterosDiacos/jiant
|
5aca4c5c54c4385708d3bda2d53420224ddf5dc3
|
[
"MIT"
] | null | null | null |
tests/test_preprocess_winograd.py
|
PterosDiacos/jiant
|
5aca4c5c54c4385708d3bda2d53420224ddf5dc3
|
[
"MIT"
] | null | null | null |
tests/test_preprocess_winograd.py
|
PterosDiacos/jiant
|
5aca4c5c54c4385708d3bda2d53420224ddf5dc3
|
[
"MIT"
] | null | null | null |
import csv
import os
import pandas as pd
import shutil
import tempfile
import unittest
import src.utils.retokenize as retokenize
import json
import copy
"""
Tests scripts/winograd/preprocess_winograd.py.
"""
class TestPreprocessWinograd(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.path = os.path.join(self.temp_dir, "temp_winograd_dataset.tsv")
with open(self.path, "w") as jsonfile:
# test for a indices that shouldn't be changed by tokenization
jsonfile.write(
json.dumps(
{
"text": "Members of the House clapped their hands",
"target": {
"span1_index": 0,
"span1_text": "members",
"span2_index": 5,
"span2_text": "their",
"label": True,
},
}
)
)
jsonfile.write("\n")
# test where both span indices should shift
jsonfile.write(
json.dumps(
{
"text": "Mr. Ford told me to tell you to contact him",
"target": {
"span1_index": 0,
"span1_text": "Mr. Ford",
"span2_index": 9,
"span2_text": "him",
"label": True,
},
}
)
)
jsonfile.write("\n")
# test where only one of the span indices changes
jsonfile.write(
json.dumps(
{
"text": "I told you already, Mr. Ford!",
"target": {
"span1_index": 4,
"span1_text": "Mr. Ford",
"span2_index": 0,
"span2_text": "I",
"label": False,
},
}
)
)
jsonfile.write("\n")
jsonfile.write(
json.dumps(
{
"text": "I look at Sarah's dog. It was cute.!",
"target": {
"span1_index": 3,
"span1_text": "Sarah's dog.",
"span2_index": 0,
"span2_text": "I",
"label": False,
},
}
)
)
def test_bert(self):
records = list(pd.read_json(self.path, lines=True).T.to_dict().values())
orig_records = copy.deepcopy(records)
for rec in records[:-1]:
retokenize.realign_spans(rec, "bert-large-cased")
retokenize.realign_spans(records[-1], "MosesTokenizer")
assert records[0]["text"] == orig_records[0]["text"]
# the two below should be changed by tokenization
assert records[1]["text"] != orig_records[1]["text"]
assert records[2]["text"] != orig_records[2]["text"]
result_span1 = records[0]["target"]["span1"]
result_span2 = records[0]["target"]["span2"]
assert result_span1 == [0, 1]
assert result_span2 == [5, 6]
result_span1 = records[1]["target"]["span1"]
result_span2 = records[1]["target"]["span2"]
assert result_span1 == [0, 3]
assert result_span2 == [10, 11]
result_span1 = records[2]["target"]["span1"]
result_span2 = records[2]["target"]["span2"]
assert result_span1 == [5, 9]
assert result_span2 == [0, 1]
result_span1 = records[3]["target"]["span1"]
result_span2 = records[3]["target"]["span2"]
assert result_span1 == [3, 7]
assert result_span2 == [0, 1]
def tearDown(self):
shutil.rmtree(self.temp_dir)
| 34.049587
| 80
| 0.433738
|
b56cbd67278be53c24d217ea331c9a69928358c0
| 2,749
|
py
|
Python
|
yodatools/dataloader/controller/WizardSummaryPageController.py
|
ODM2/YODAParser
|
274a1fc5ed1810bc748a4ab108855254f8b9fc46
|
[
"BSD-3-Clause"
] | null | null | null |
yodatools/dataloader/controller/WizardSummaryPageController.py
|
ODM2/YODAParser
|
274a1fc5ed1810bc748a4ab108855254f8b9fc46
|
[
"BSD-3-Clause"
] | 21
|
2016-02-06T00:43:44.000Z
|
2018-02-02T20:22:05.000Z
|
yodatools/dataloader/controller/WizardSummaryPageController.py
|
ODM2/ODM2YODAParser
|
274a1fc5ed1810bc748a4ab108855254f8b9fc46
|
[
"BSD-3-Clause"
] | 1
|
2017-07-06T18:42:22.000Z
|
2017-07-06T18:42:22.000Z
|
import os
import time
import wx
from pubsub import pub
from yodatools.converter.Inputs.excelInput import ExcelInput
from yodatools.converter.Inputs.yamlInput import yamlInput
from yodatools.converter.Outputs.yamlOutput import yamlOutput
from yodatools.converter.Outputs.dbOutput import dbOutput
from yodatools.dataloader.view.WizardSummaryPageView import WizardSummaryPageView
class WizardSummaryPageController(WizardSummaryPageView):
def __init__(self, parent, panel, title):
super(WizardSummaryPageController, self).__init__(panel)
self.parent = parent
self.title = title
def run(self, *args):
try:
self.__run(*args)
except Exception as e:
pub.sendMessage('controller.error', message=e.message)
if os.getenv('DEBUG', 'false') == 'true':
raise
def __run(self, input_file, yoda_output_file_path=None, odm2_connection=None, sqlite_connection=None):
# Check if it is a yaml, or excel file
file_type = verify_file_type(input_file)
conn = next((conn for conn in [odm2_connection, sqlite_connection] if conn is not None), None)
if file_type == 'invalid': # Accept only excel and yaml files
print('File extension invalid or no file')
return
if file_type == 'excel':
excel = ExcelInput(conn=conn, gauge=self.gauge)
excel.parse(input_file)
session = excel.session
else:
# Must be a yoda file
yoda = yamlInput()
yoda.parse(input_file)
session = yoda.sendODM2Session()
print("Input complete")
# Go through each checkbox
if yoda_output_file_path is not None:
self.gauge.SetValue(50)
yaml = yamlOutput()
yaml.save(session=session, file_path=yoda_output_file_path)
print "Yoda Output Complete"
# if odm2_connection is not None:
# db = dbOutput()
# db.save(session=session, connection_string=odm2_connection)
# print "DB Output Complete"
#
# if sqlite_connection is not None:
# db = dbOutput()
# db.save(session=session, connection_string=sqlite_connection)
# print "SQLite Output Complete"
session.close_all()
self.gauge.SetValue(100)
self.parent.load_finished_execution()
return
def verify_file_type(input_file):
CONST_LEGAL_EXCEL_EXTENSIONS = ('xlsx', 'xlsm')
if input_file.endswith(CONST_LEGAL_EXCEL_EXTENSIONS):
file_type = 'excel'
elif input_file.endswith('yml'):
file_type = 'yaml'
else:
file_type = 'invalid'
return file_type
| 30.208791
| 106
| 0.64387
|
efbe555d6b324a494ed9bb22ac1fb27e83c6950f
| 17,539
|
py
|
Python
|
siliconcompiler/sphinx_ext/dynamicgen.py
|
siliconcompiler/siliconcompiler
|
6aa2b53441608f228bd520b68c0324fc9cf96377
|
[
"Apache-2.0"
] | 424
|
2021-12-04T15:45:12.000Z
|
2022-03-31T20:27:55.000Z
|
siliconcompiler/sphinx_ext/dynamicgen.py
|
siliconcompiler/siliconcompiler
|
6aa2b53441608f228bd520b68c0324fc9cf96377
|
[
"Apache-2.0"
] | 105
|
2021-12-03T21:25:29.000Z
|
2022-03-31T22:36:59.000Z
|
siliconcompiler/sphinx_ext/dynamicgen.py
|
siliconcompiler/siliconcompiler
|
6aa2b53441608f228bd520b68c0324fc9cf96377
|
[
"Apache-2.0"
] | 38
|
2021-12-04T21:26:20.000Z
|
2022-03-21T02:39:29.000Z
|
'''Sphinx extension that provides directives for automatically generating
documentation for dynamically loaded modules used by SC.
'''
from docutils import nodes
from sphinx.util.nodes import nested_parse_with_titles
from docutils.statemachine import ViewList
from sphinx.util.docutils import SphinxDirective
import docutils
import importlib
import pkgutil
import os
import subprocess
import siliconcompiler
from siliconcompiler import utils
from siliconcompiler.sphinx_ext.utils import *
#############
# Helpers
#############
# We need this in a few places, so just make it global
SC_ROOT = os.path.abspath(f'{__file__}/../../../')
def build_schema_value_table(schema, keypath_prefix=[], skip_zero_weight=False):
'''Helper function for displaying values set in schema as a docutils table.'''
table = [[strong('Keypath'), strong('Value')]]
flat_cfg = flatten(schema)
for keys, val in flat_cfg.items():
full_keypath = list(keypath_prefix) + list(keys)
if (skip_zero_weight and
len(full_keypath) == 6 and full_keypath[0] == 'flowgraph' and full_keypath[-2] == 'weight' and
'value' in val and val['value'] == '0'):
continue
if 'value' in val and val['value']:
# Don't display false booleans
if val['type'] == 'bool' and val['value'] == 'false':
continue
if val['type'].startswith('['):
if len(val['value']) > 1:
val_node = build_list([code(v) for v in val['value']])
elif len(val['value']) > 0:
val_node = code(val['value'][0])
else:
val_node = para('')
else:
val_node = code(val['value'])
# HTML builder fails if we don't make a text node the parent of the
# reference node returned by keypath()
p = nodes.paragraph()
p += keypath(*full_keypath)
table.append([p, val_node])
if len(table) > 1:
# This colspec creates two columns of equal width that fill the entire
# page, and adds line breaks if table cell contents are longer than one
# line. "\X" is defined by Sphinx, otherwise this is standard LaTeX.
colspec = r'{|\X{1}{2}|\X{1}{2}|}'
return build_table(table, colspec=colspec)
else:
return None
def build_config_recursive(schema, keypath_prefix=[], sec_key_prefix=[]):
'''Helper function for displaying schema at each level as tables under nested
sections.
For each item:
- If it's a leaf, collect it into a table we will display at this
level
- Otherwise, recurse and collect sections of lower levels
'''
leaves = {}
child_sections = []
for key, val in schema.items():
if key == 'default': continue
if 'help' in val:
if 'value' in val and val['value']:
leaves.update({key: val})
else:
children = build_config_recursive(val, keypath_prefix=keypath_prefix+[key], sec_key_prefix=sec_key_prefix)
child_sections.extend(children)
# If we've found leaves, create a new section where we'll display a
# table plus all child sections.
if len(leaves) > 0:
keypath = ', '.join(keypath_prefix)
section_key = '-'.join(sec_key_prefix + keypath_prefix)
top = build_section(keypath, section_key)
top += build_schema_value_table(leaves, keypath_prefix=keypath_prefix)
top += child_sections
return [top]
else:
# Otherwise, just pass on the child sections -- we don't want to
# create an extra level of section hierarchy for levels of the
# schema without leaves.
return child_sections
#############
# Base class
#############
def flag_opt(argument):
if argument is not None:
raise ValueError('Flag should not have content')
return True
class DynamicGen(SphinxDirective):
'''Base class for all three directives provided by this extension.
Each child class implements a directive by overriding the display_config()
method and setting a PATH member variable.
'''
option_spec = {'nobuiltins': flag_opt}
def document_module(self, module, modname, path):
'''Build section documenting given module and name.'''
print(f'Generating docs for module {modname}...')
s = build_section_with_target(modname, f'{modname}-ref', self.state.document)
if not hasattr(module, 'make_docs'):
return None
make_docs = getattr(module, 'make_docs')
# raw docstrings have funky indentation (basically, each line is already
# indented as much as the function), so we call trim() helper function
# to clean it up
docstr = utils.trim(make_docs.__doc__)
if docstr:
self.parse_rst(docstr, s)
builtin = os.path.abspath(path).startswith(SC_ROOT)
if builtin:
relpath = path[len(SC_ROOT)+1:]
gh_root = 'https://github.com/siliconcompiler/siliconcompiler/blob/main'
gh_link = f'{gh_root}/{relpath}'
filename = os.path.basename(relpath)
p = para('Setup file: ')
p += link(gh_link, text=filename)
s += p
chip = make_docs()
extra_content = self.extra_content(chip, modname)
if extra_content is not None:
s += extra_content
s += self.display_config(chip, modname)
return s
def run(self):
'''Main entry point of directive.'''
sections = []
for module, modname in self.get_modules():
path = module.__file__
self.env.note_dependency(path)
docs = self.document_module(module, modname, path)
if docs is not None:
sections.append((docs, modname))
if len(sections) > 0:
# Sort sections by module name
sections = sorted(sections, key=lambda t: t[1])
# Strip off modname so we just return list of docutils sections
sections, _ = zip(*sections)
return list(sections)
def get_modules(self):
'''Gets dynamic modules under `self.PATH`.
This function explicitly searches builtins as well as SCPATH
directories. Although the directory for builtin tools gets added to
SCPATH after a chip object has been initialized, we can't rely on this
since we can't be sure that's happened yet. Therefore, we have to check
each one explicitly.
However, this could result in duplicate modules being detected once the
SCPATH does get updated. Therefore, we check to ensure that SCPATH
directories are not equal to the builtins directory before searching it.
TODO: we want better duplicate resolution (in case the user explicitly
declares a duplicate tool), where SCPATH takes priority.
'''
builtins_dir = f'{SC_ROOT}/siliconcompiler/{self.PATH}'
if 'nobuiltins' not in self.options:
modules = self.get_modules_in_dir(builtins_dir)
else:
modules = []
if 'SCPATH' in os.environ:
scpaths = os.environ['SCPATH'].split(':')
for scpath in scpaths:
user_dir = f'{scpath}/{self.PATH}'
if not os.path.isdir(user_dir) or builtins_dir == user_dir:
continue
modules.extend(self.get_modules_in_dir(user_dir))
return modules
def get_modules_in_dir(self, module_dir):
'''Routine for getting modules and their names from a certain
directory.'''
modules = []
for importer, modname, _ in pkgutil.iter_modules([module_dir]):
if modname in ('sc_floorplan'):
continue
module = importer.find_module(modname).load_module(modname)
modules.append((module, modname))
return modules
def parse_rst(self, content, s):
'''Helper for parsing reStructuredText content, adding it directly to
section `s`.'''
rst = ViewList()
# use fake filename 'inline' for error # reporting
for i, line in enumerate(content.split('\n')):
rst.append(line, 'inline', i)
nested_parse_with_titles(self.state, rst, s)
def extra_content(self, chip, modname):
'''Adds extra content to documentation.
May return a list of docutils nodes that will be added to the
documentation in between a module's docstring and configuration table.
Otherwise, if return value is None, don't add anything.
'''
return None
#########################
# Specialized extensions
#########################
class FlowGen(DynamicGen):
PATH = 'flows'
def extra_content(self, chip, modname):
flow_path = os.path.join(self.env.app.outdir, f'_images/gen/{modname}.svg')
#chip.write_flowgraph(flow_path, fillcolor='#1c4587', fontcolor='#f1c232', border=False)
chip.write_flowgraph(flow_path)
return [image(flow_path, center=True)]
def display_config(self, chip, modname):
'''Display parameters under `flowgraph, <step>`, `metric, <step>` and
`showtool`. Parameters are grouped into sections by step, with an
additional table for non-step items.
'''
section_key = '-'.join(['flows', modname, 'configuration'])
settings = build_section('Configuration', section_key)
steps = chip.getkeys('flowgraph')
# TODO: should try to order?
# Build section + table for each step (combining entires under flowgraph
# and metric)
for step in steps:
section_key = '-'.join(['flows', modname, step])
section = build_section(step, section_key)
step_cfg = {}
for prefix in ['flowgraph']:
cfg = chip.getdict(prefix, step)
if cfg is None:
continue
pruned = chip._prune(cfg)
if prefix not in step_cfg:
step_cfg[prefix] = {}
step_cfg[prefix][step] = pruned
section += build_schema_value_table(step_cfg, skip_zero_weight=True)
settings += section
# Build table for non-step items (just showtool for now)
section_key = '-'.join(['flows', modname, 'option', 'showtool'])
section = build_section('showtool', section_key)
cfg = chip.getdict('option', 'showtool')
pruned = chip._prune(cfg)
table = build_schema_value_table(pruned, keypath_prefix=['option', 'showtool'])
if table is not None:
section += table
settings += section
return settings
class PDKGen(DynamicGen):
PATH = 'pdks'
def display_config(self, chip, modname):
'''Display parameters under `pdk`, `asic`, and `library` in nested form.'''
section_key = '-'.join(['pdks', modname, 'configuration'])
settings = build_section('Configuration', section_key)
cfg = chip.getdict('pdk')
settings += build_config_recursive(cfg, keypath_prefix=['pdk'], sec_key_prefix=['pdks', modname])
return settings
class LibGen(DynamicGen):
PATH = 'libs'
def extra_content(self, chip, modname):
# assume same pdk for all libraries configured by this module
mainlib = chip.getkeys('library')[0]
pdk = chip.get('library', mainlib, 'asic', 'pdk')
p = docutils.nodes.inline('')
self.parse_rst(f'Associated PDK: :ref:`{pdk}<{pdk}-ref>`', p)
return [p]
def display_config(self, chip, modname):
'''Display parameters under in nested form.'''
sections = []
for libname in chip.getkeys('library'):
section_key = '-'.join(['libs', modname, libname, 'configuration'])
settings = build_section(libname, section_key)
for key in ('asic', 'model'):
cfg = chip.getdict('library', libname, key)
settings += build_config_recursive(cfg, keypath_prefix=[key], sec_key_prefix=['libs', modname, libname, key])
sections.append(settings)
return sections
class ToolGen(DynamicGen):
PATH = 'tools'
def display_config(self, chip, modname):
'''Display config under `eda, <modname>` in a single table.'''
cfg = chip.getdict('tool', modname)
pruned = chip._prune(cfg)
table = build_schema_value_table(pruned, keypath_prefix=['tool', modname])
if table is not None:
return table
else:
return []
def get_modules_in_dir(self, module_dir):
'''Custom implementation for ToolGen since the tool setup modules are
under an extra directory, and this way we don't have to force users to
add an __init__.py to make the directory a module itself.
'''
modules = []
for toolname in os.listdir(module_dir):
# skip over directories/files that don't match the structure of tool
# directories (otherwise we'll get confused by Python metadata like
# __init__.py or __pycache__/)
if not os.path.isdir(f'{module_dir}/{toolname}'):
continue
path = f'{module_dir}/{toolname}/{toolname}.py'
if not os.path.exists(path):
continue
spec = importlib.util.spec_from_file_location(toolname, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
modules.append((module, toolname))
return modules
class TargetGen(DynamicGen):
PATH = 'targets'
def build_module_list(self, chip, header, modtype, targetname):
modules = chip._loaded_modules[modtype]
if len(modules) > 0:
section = build_section(header, f'{targetname}-{modtype}')
modlist = nodes.bullet_list()
for module in modules:
list_item = nodes.list_item()
# TODO: replace with proper docutils nodes: sphinx.addnodes.pending_xref
modkey = nodes.make_id(module)
self.parse_rst(f':ref:`{module}<{modkey}-ref>`', list_item)
modlist += list_item
section += modlist
return section
return None
def display_config(self, chip, modname):
sections = []
flow_section = self.build_module_list(chip, 'Flows', 'flows', modname)
if flow_section is not None:
sections.append(flow_section)
pdk_section = self.build_module_list(chip, 'PDK', 'pdks', modname)
if pdk_section is not None:
sections.append(pdk_section)
libs_section = self.build_module_list(chip, 'Libraries', 'libs', modname)
if libs_section is not None:
sections.append(libs_section)
filtered_cfg = {}
for key in ('asic', 'constraint', 'option'):
filtered_cfg[key] = chip.getdict(key)
pruned_cfg = chip._prune(filtered_cfg)
if len(pruned_cfg) > 0:
schema_section = build_section('Configuration', key=f'{modname}-config')
schema_section += build_schema_value_table(pruned_cfg)
sections.append(schema_section)
return sections
class AppGen(DynamicGen):
PATH = 'apps'
def document_module(self, module, modname, path):
# TODO: Auto-documentation does not work with apps that use 'input(...)'
if modname in ('sc_configure'):
return
cmd_name = modname.replace('_', '-')
cmd = [cmd_name, '--help']
output = subprocess.check_output(cmd).decode('utf-8')
section = build_section(cmd_name, cmd_name)
section += literalblock(output)
return section
class ExampleGen(DynamicGen):
def get_modules(self):
examples_dir = f'{SC_ROOT}/examples'
modules = []
for example in os.listdir(examples_dir):
if not os.path.isdir(f'{examples_dir}/{example}'):
continue
path = f'{examples_dir}/{example}/{example}.py'
if not os.path.exists(path):
continue
spec = importlib.util.spec_from_file_location(example, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
modules.append((module, example))
return modules
def document_module(self, module, modname, path):
section = build_section(modname, modname)
if not hasattr(module, 'main'):
return None
main = getattr(module, 'main')
# raw docstrings have funky indentation (basically, each line is already
# indented as much as the function), so we call trim() helper function
# to clean it up
docstr = utils.trim(main.__doc__)
if docstr:
self.parse_rst(docstr, section)
return section
def setup(app):
app.add_directive('flowgen', FlowGen)
app.add_directive('pdkgen', PDKGen)
app.add_directive('libgen', LibGen)
app.add_directive('toolgen', ToolGen)
app.add_directive('appgen', AppGen)
app.add_directive('examplegen', ExampleGen)
app.add_directive('targetgen', TargetGen)
return {
'version': siliconcompiler.__version__,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 35.576065
| 125
| 0.611608
|
39b6f45b01e2bdfd59d3064c3805dc932e82e1d3
| 7,542
|
py
|
Python
|
doc/conf.py
|
heavywatal/pysam
|
482e887e67ce035e8a9e8fe1ec55be6c80d3e5dc
|
[
"MIT"
] | 553
|
2015-01-02T15:04:03.000Z
|
2022-03-31T18:14:11.000Z
|
doc/conf.py
|
heavywatal/pysam
|
482e887e67ce035e8a9e8fe1ec55be6c80d3e5dc
|
[
"MIT"
] | 940
|
2015-01-05T05:12:07.000Z
|
2022-03-30T17:09:19.000Z
|
doc/conf.py
|
heavywatal/pysam
|
482e887e67ce035e8a9e8fe1ec55be6c80d3e5dc
|
[
"MIT"
] | 287
|
2015-01-04T22:46:07.000Z
|
2022-03-23T20:37:25.000Z
|
# -*- coding: utf-8 -*-
#
# samtools documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 12 14:43:42 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, sysconfig
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
_pyversion = sysconfig.get_python_version()
_libdir = "../build/lib.%s-%s" % (sysconfig.get_platform(), _pyversion)
if os.path.exists(_libdir):
sys.path.insert(0, os.path.abspath(_libdir))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon']
intersphinx_mapping = {'python': ('https://docs.python.org/%s' % _pyversion, None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysam'
copyright = u'2009–2021, Andreas Heger, Kevin Jacobs, et al'
# Included at the end of each rst file
rst_epilog = '''
.. _CGAT Training Programme: http://www.cgat.org
.. _pysam: https://github.com/pysam-developers/pysam
.. _samtools: http://samtools.sourceforge.net/
.. _bcftools: https://samtools.github.io/bcftools/bcftools.html
.. _htslib: http://www.htslib.org/
.. _tabix: http://samtools.sourceforge.net/tabix.shtml
.. _Galaxy: https://main.g2.bx.psu.edu/
.. _cython: http://cython.org/
.. _python: http://python.org/
.. _pypi: https://pypi.org/
.. _pip: https://pip.pypa.io/
.. _pyximport: http://www.prescod.net/pyximport/
.. _conda: https://conda.io/docs/
.. _bioconda: https://bioconda.github.io/
'''
autosummary_generate = True
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import pysam.version
# The short X.Y version.
version = pysam.version.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'samtoolsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pysam.tex', u'pysam documentation',
u'Andreas Heger, Kevin Jacobs, et al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
| 33.371681
| 83
| 0.713073
|
54509ce70f7f7000be44c003e1dcb13dc5f8820f
| 32,969
|
py
|
Python
|
torchgeo/datasets/geo.py
|
RitwikGupta/torchgeo
|
14c19e35c2b17f9cd6f2dcbdc0968283aa89fbbb
|
[
"MIT"
] | 1
|
2022-01-07T01:42:32.000Z
|
2022-01-07T01:42:32.000Z
|
torchgeo/datasets/geo.py
|
RitwikGupta/torchgeo
|
14c19e35c2b17f9cd6f2dcbdc0968283aa89fbbb
|
[
"MIT"
] | null | null | null |
torchgeo/datasets/geo.py
|
RitwikGupta/torchgeo
|
14c19e35c2b17f9cd6f2dcbdc0968283aa89fbbb
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Base classes for all :mod:`torchgeo` datasets."""
import abc
import functools
import glob
import os
import re
import sys
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast
import fiona
import fiona.transform
import matplotlib.pyplot as plt
import numpy as np
import pyproj
import rasterio
import rasterio.merge
import shapely
import torch
from rasterio.crs import CRS
from rasterio.io import DatasetReader
from rasterio.vrt import WarpedVRT
from rasterio.windows import from_bounds
from rtree.index import Index, Property
from torch import Tensor
from torch.utils.data import Dataset
from torchvision.datasets import ImageFolder
from torchvision.datasets.folder import default_loader as pil_loader
from .utils import BoundingBox, concat_samples, disambiguate_timestamp, merge_samples
# https://github.com/pytorch/pytorch/issues/60979
# https://github.com/pytorch/pytorch/pull/61045
Dataset.__module__ = "torch.utils.data"
ImageFolder.__module__ = "torchvision.datasets"
class GeoDataset(Dataset[Dict[str, Any]], abc.ABC):
"""Abstract base class for datasets containing geospatial information.
Geospatial information includes things like:
* coordinates (latitude, longitude)
* :term:`coordinate reference system (CRS)`
* resolution
:class:`GeoDataset` is a special class of datasets. Unlike :class:`VisionDataset`,
the presence of geospatial information allows two or more datasets to be combined
based on latitude/longitude. This allows users to do things like:
* Combine image and target labels and sample from both simultaneously
(e.g. Landsat and CDL)
* Combine datasets for multiple image sources for multimodal learning or data fusion
(e.g. Landsat and Sentinel)
These combinations require that all queries are present in *both* datasets,
and can be combined using an :class:`IntersectionDataset`:
.. code-block:: python
dataset = landsat & cdl
Users may also want to:
* Combine datasets for multiple image sources and treat them as equivalent
(e.g. Landsat 7 and Landsat 8)
* Combine datasets for disparate geospatial locations
(e.g. Chesapeake NY and PA)
These combinations require that all queries are present in *at least one* dataset,
and can be combined using a :class:`UnionDataset`:
.. code-block:: python
dataset = landsat7 | landsat8
"""
#: Resolution of the dataset in units of CRS.
res: float
_crs: CRS
# NOTE: according to the Python docs:
#
# * https://docs.python.org/3/library/exceptions.html#NotImplementedError
#
# the correct way to handle __add__ not being supported is to set it to None,
# not to return NotImplemented or raise NotImplementedError. The downside of
# this is that we have no way to explain to a user why they get an error and
# what they should do instead (use __and__ or __or__).
#: :class:`GeoDataset` addition can be ambiguous and is no longer supported.
#: Users should instead use the intersection or union operator.
__add__ = None # type: ignore[assignment]
def __init__(
self, transforms: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None
) -> None:
"""Initialize a new Dataset instance.
Args:
transforms: a function/transform that takes an input sample
and returns a transformed version
"""
self.transforms = transforms
# Create an R-tree to index the dataset
self.index = Index(interleaved=False, properties=Property(dimension=3))
@abc.abstractmethod
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image/mask and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of image/mask and metadata at that index
Raises:
IndexError: if query is not found in the index
"""
def __and__(self, other: "GeoDataset") -> "IntersectionDataset":
"""Take the intersection of two :class:`GeoDataset`.
Args:
other: another dataset
Returns:
a single dataset
Raises:
ValueError: if other is not a :class:`GeoDataset`
"""
return IntersectionDataset(self, other)
def __or__(self, other: "GeoDataset") -> "UnionDataset":
"""Take the union of two GeoDatasets.
Args:
other: another dataset
Returns:
a single dataset
Raises:
ValueError: if other is not a :class:`GeoDataset`
"""
return UnionDataset(self, other)
def __len__(self) -> int:
"""Return the number of files in the dataset.
Returns:
length of the dataset
"""
count: int = self.index.count(self.index.bounds)
return count
def __str__(self) -> str:
"""Return the informal string representation of the object.
Returns:
informal string representation
"""
return f"""\
{self.__class__.__name__} Dataset
type: GeoDataset
bbox: {self.bounds}
size: {len(self)}"""
@property
def bounds(self) -> BoundingBox:
"""Bounds of the index.
Returns:
(minx, maxx, miny, maxy, mint, maxt) of the dataset
"""
return BoundingBox(*self.index.bounds)
@property
def crs(self) -> CRS:
""":term:`coordinate reference system (CRS)` for the dataset.
Returns:
the :term:`coordinate reference system (CRS)`
"""
return self._crs
@crs.setter
def crs(self, new_crs: CRS) -> None:
"""Change the :term:`coordinate reference system (CRS)` of a GeoDataset.
If ``new_crs == self.crs``, does nothing, otherwise updates the R-tree index.
Args:
new_crs: new :term:`coordinate reference system (CRS)`
"""
if new_crs == self._crs:
return
new_index = Index(interleaved=False, properties=Property(dimension=3))
project = pyproj.Transformer.from_crs(
pyproj.CRS(str(self._crs)), pyproj.CRS(str(new_crs)), always_xy=True
).transform
for hit in self.index.intersection(self.index.bounds, objects=True):
old_minx, old_maxx, old_miny, old_maxy, mint, maxt = hit.bounds
old_box = shapely.geometry.box(old_minx, old_miny, old_maxx, old_maxy)
new_box = shapely.ops.transform(project, old_box)
new_minx, new_miny, new_maxx, new_maxy = new_box.bounds
new_bounds = (new_minx, new_maxx, new_miny, new_maxy, mint, maxt)
new_index.insert(hit.id, new_bounds, hit.object)
self._crs = new_crs
self.index = new_index
class RasterDataset(GeoDataset):
"""Abstract base class for :class:`GeoDataset` stored as raster files."""
#: Glob expression used to search for files.
#:
#: This expression should be specific enough that it will not pick up files from
#: other datasets. It should not include a file extension, as the dataset may be in
#: a different file format than what it was originally downloaded as.
filename_glob = "*"
#: Regular expression used to extract date from filename.
#:
#: The expression should use named groups. The expression may contain any number of
#: groups. The following groups are specifically searched for by the base class:
#:
#: * ``date``: used to calculate ``mint`` and ``maxt`` for ``index`` insertion
#:
#: When :attr:`separate_files`` is True, the following additional groups are
#: searched for to find other files:
#:
#: * ``band``: replaced with requested band name
#: * ``resolution``: replaced with a glob character
filename_regex = ".*"
#: Date format string used to parse date from filename.
#:
#: Not used if :attr:`filename_regex` does not contain a ``date`` group.
date_format = "%Y%m%d"
#: True if dataset contains imagery, False if dataset contains mask
is_image = True
#: True if data is stored in a separate file for each band, else False.
separate_files = False
#: Names of all available bands in the dataset
all_bands: List[str] = []
#: Names of RGB bands in the dataset, used for plotting
rgb_bands: List[str] = []
#: If True, stretch the image from the 2nd percentile to the 98th percentile,
#: used for plotting
stretch = False
#: Color map for the dataset, used for plotting
cmap: Dict[int, Tuple[int, int, int, int]] = {}
def __init__(
self,
root: str,
crs: Optional[CRS] = None,
res: Optional[float] = None,
transforms: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
cache: bool = True,
) -> None:
"""Initialize a new Dataset instance.
Args:
root: root directory where dataset can be found
crs: :term:`coordinate reference system (CRS)` to warp to
(defaults to the CRS of the first file found)
res: resolution of the dataset in units of CRS
(defaults to the resolution of the first file found)
transforms: a function/transform that takes an input sample
and returns a transformed version
cache: if True, cache file handle to speed up repeated sampling
Raises:
FileNotFoundError: if no files are found in ``root``
"""
super().__init__(transforms)
self.root = root
self.cache = cache
# Populate the dataset index
i = 0
pathname = os.path.join(root, "**", self.filename_glob)
filename_regex = re.compile(self.filename_regex, re.VERBOSE)
for filepath in glob.iglob(pathname, recursive=True):
match = re.match(filename_regex, os.path.basename(filepath))
if match is not None:
try:
with rasterio.open(filepath) as src:
# See if file has a color map
try:
self.cmap = src.colormap(1)
except ValueError:
pass
if crs is None:
crs = src.crs
if res is None:
res = src.res[0]
with WarpedVRT(src, crs=crs) as vrt:
minx, miny, maxx, maxy = vrt.bounds
except rasterio.errors.RasterioIOError:
# Skip files that rasterio is unable to read
continue
else:
mint: float = 0
maxt: float = sys.maxsize
if "date" in match.groupdict():
date = match.group("date")
mint, maxt = disambiguate_timestamp(date, self.date_format)
coords = (minx, maxx, miny, maxy, mint, maxt)
self.index.insert(i, coords, filepath)
i += 1
if i == 0:
raise FileNotFoundError(
f"No {self.__class__.__name__} data was found in '{root}'"
)
self._crs = cast(CRS, crs)
self.res = cast(float, res)
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image/mask and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of image/mask and metadata at that index
Raises:
IndexError: if query is not found in the index
"""
hits = self.index.intersection(tuple(query), objects=True)
filepaths = [hit.object for hit in hits]
if not filepaths:
raise IndexError(
f"query: {query} not found in index with bounds: {self.bounds}"
)
if self.separate_files:
data_list: List[Tensor] = []
filename_regex = re.compile(self.filename_regex, re.VERBOSE)
for band in getattr(self, "bands", self.all_bands):
band_filepaths = []
for filepath in filepaths:
filename = os.path.basename(filepath)
directory = os.path.dirname(filepath)
match = re.match(filename_regex, filename)
if match:
if "date" in match.groupdict():
start = match.start("band")
end = match.end("band")
filename = filename[:start] + band + filename[end:]
if "resolution" in match.groupdict():
start = match.start("resolution")
end = match.end("resolution")
filename = filename[:start] + "*" + filename[end:]
filepath = glob.glob(os.path.join(directory, filename))[0]
band_filepaths.append(filepath)
data_list.append(self._merge_files(band_filepaths, query))
data = torch.cat(data_list) # type: ignore[attr-defined]
else:
data = self._merge_files(filepaths, query)
key = "image" if self.is_image else "mask"
sample = {key: data, "crs": self.crs, "bbox": query}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def _merge_files(self, filepaths: Sequence[str], query: BoundingBox) -> Tensor:
"""Load and merge one or more files.
Args:
filepaths: one or more files to load and merge
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
image/mask at that index
"""
if self.cache:
vrt_fhs = [self._cached_load_warp_file(fp) for fp in filepaths]
else:
vrt_fhs = [self._load_warp_file(fp) for fp in filepaths]
bounds = (query.minx, query.miny, query.maxx, query.maxy)
if len(vrt_fhs) == 1:
src = vrt_fhs[0]
out_width = int(round((query.maxx - query.minx) / self.res))
out_height = int(round((query.maxy - query.miny) / self.res))
out_shape = (src.count, out_height, out_width)
dest = src.read(
out_shape=out_shape, window=from_bounds(*bounds, src.transform)
)
else:
dest, _ = rasterio.merge.merge(vrt_fhs, bounds, self.res)
dest = dest.astype(np.int32)
tensor: Tensor = torch.tensor(dest) # type: ignore[attr-defined]
return tensor
@functools.lru_cache(maxsize=128)
def _cached_load_warp_file(self, filepath: str) -> DatasetReader:
"""Cached version of :meth:`_load_warp_file`.
Args:
filepath: file to load and warp
Returns:
file handle of warped VRT
"""
return self._load_warp_file(filepath)
def _load_warp_file(self, filepath: str) -> DatasetReader:
"""Load and warp a file to the correct CRS and resolution.
Args:
filepath: file to load and warp
Returns:
file handle of warped VRT
"""
src = rasterio.open(filepath)
# Only warp if necessary
if src.crs != self.crs:
vrt = WarpedVRT(src, crs=self.crs)
src.close()
return vrt
else:
return src
def plot(self, data: Tensor) -> None:
"""Plot a data sample.
Args:
data: the data to plot
Raises:
AssertionError: if ``is_image`` is True and ``data`` has a different number
of channels than expected
"""
array = data.squeeze().numpy()
if self.is_image:
bands = getattr(self, "bands", self.all_bands)
assert array.shape[0] == len(bands)
# Only plot RGB bands
if bands and self.rgb_bands:
indices = np.array([bands.index(band) for band in self.rgb_bands])
array = array[indices]
# Convert from CxHxW to HxWxC
array = np.rollaxis(array, 0, 3)
if self.cmap:
# Convert from class labels to RGBA values
cmap = np.array([self.cmap[i] for i in range(len(self.cmap))])
array = cmap[array]
if self.stretch:
# Stretch to the range of 2nd to 98th percentile
per02 = np.percentile(array, 2) # type: ignore[no-untyped-call]
per98 = np.percentile(array, 98) # type: ignore[no-untyped-call]
array = (array - per02) / (per98 - per02)
array = np.clip(array, 0, 1)
# Plot the data
ax = plt.axes()
ax.imshow(array)
ax.axis("off")
plt.show()
plt.close()
class VectorDataset(GeoDataset):
"""Abstract base class for :class:`GeoDataset` stored as vector files."""
#: Glob expression used to search for files.
#:
#: This expression should be specific enough that it will not pick up files from
#: other datasets. It should not include a file extension, as the dataset may be in
#: a different file format than what it was originally downloaded as.
filename_glob = "*"
def __init__(
self,
root: str = "data",
crs: Optional[CRS] = None,
res: float = 0.0001,
transforms: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
) -> None:
"""Initialize a new Dataset instance.
Args:
root: root directory where dataset can be found
crs: :term:`coordinate reference system (CRS)` to warp to
(defaults to the CRS of the first file found)
res: resolution of the dataset in units of CRS
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
Raises:
FileNotFoundError: if no files are found in ``root``
"""
super().__init__(transforms)
self.root = root
self.res = res
# Populate the dataset index
i = 0
pathname = os.path.join(root, "**", self.filename_glob)
for filepath in glob.iglob(pathname, recursive=True):
try:
with fiona.open(filepath) as src:
if crs is None:
crs = CRS.from_dict(src.crs)
minx, miny, maxx, maxy = src.bounds
(minx, maxx), (miny, maxy) = fiona.transform.transform(
src.crs, crs.to_dict(), [minx, maxx], [miny, maxy]
)
except fiona.errors.FionaValueError:
# Skip files that fiona is unable to read
continue
else:
mint = 0
maxt = sys.maxsize
coords = (minx, maxx, miny, maxy, mint, maxt)
self.index.insert(i, coords, filepath)
i += 1
if i == 0:
raise FileNotFoundError(
f"No {self.__class__.__name__} data was found in '{root}'"
)
self._crs = crs
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image/mask and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of image/mask and metadata at that index
Raises:
IndexError: if query is not found in the index
"""
hits = self.index.intersection(tuple(query), objects=True)
filepaths = [hit.object for hit in hits]
if not filepaths:
raise IndexError(
f"query: {query} not found in index with bounds: {self.bounds}"
)
shapes = []
for filepath in filepaths:
with fiona.open(filepath) as src:
# We need to know the bounding box of the query in the source CRS
(minx, maxx), (miny, maxy) = fiona.transform.transform(
self.crs.to_dict(),
src.crs,
[query.minx, query.maxx],
[query.miny, query.maxy],
)
# Filter geometries to those that intersect with the bounding box
for feature in src.filter(bbox=(minx, miny, maxx, maxy)):
# Warp geometries to requested CRS
shape = fiona.transform.transform_geom(
src.crs, self.crs.to_dict(), feature["geometry"]
)
shapes.append(shape)
# Rasterize geometries
width = (query.maxx - query.minx) / self.res
height = (query.maxy - query.miny) / self.res
transform = rasterio.transform.from_bounds(
query.minx, query.miny, query.maxx, query.maxy, width, height
)
masks = rasterio.features.rasterize(
shapes, out_shape=(int(height), int(width)), transform=transform
)
sample = {
"mask": torch.tensor(masks), # type: ignore[attr-defined]
"crs": self.crs,
"bbox": query,
}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def plot(self, data: Tensor) -> None:
"""Plot a data sample.
Args:
data: the data to plot
"""
array = data.squeeze().numpy()
# Plot the image
ax = plt.axes()
ax.imshow(array)
ax.axis("off")
plt.show()
plt.close()
class VisionDataset(Dataset[Dict[str, Any]], abc.ABC):
"""Abstract base class for datasets lacking geospatial information.
This base class is designed for datasets with pre-defined image chips.
"""
@abc.abstractmethod
def __getitem__(self, index: int) -> Dict[str, Any]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
data and labels at that index
Raises:
IndexError: if index is out of range of the dataset
"""
@abc.abstractmethod
def __len__(self) -> int:
"""Return the length of the dataset.
Returns:
length of the dataset
"""
def __str__(self) -> str:
"""Return the informal string representation of the object.
Returns:
informal string representation
"""
return f"""\
{self.__class__.__name__} Dataset
type: VisionDataset
size: {len(self)}"""
class VisionClassificationDataset(VisionDataset, ImageFolder): # type: ignore[misc]
"""Abstract base class for classification datasets lacking geospatial information.
This base class is designed for datasets with pre-defined image chips which
are separated into separate folders per class.
"""
def __init__(
self,
root: str,
transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
loader: Optional[Callable[[str], Any]] = pil_loader,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> None:
"""Initialize a new VisionClassificationDataset instance.
Args:
root: root directory where dataset can be found
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
loader: a callable function which takes as input a path to an image and
returns a PIL Image or numpy array
is_valid_file: A function that takes the path of an Image file and checks if
the file is a valid file
"""
# When transform & target_transform are None, ImageFolder.__getitem__(index)
# returns a PIL.Image and int for image and label, respectively
super().__init__(
root=root,
transform=None,
target_transform=None,
loader=loader,
is_valid_file=is_valid_file,
)
# Must be set after calling super().__init__()
self.transforms = transforms
def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.
Args:
index: index to return
Returns:
data and label at that index
"""
image, label = self._load_image(index)
sample = {"image": image, "label": label}
if self.transforms is not None:
sample = self.transforms(sample)
return sample
def __len__(self) -> int:
"""Return the number of data points in the dataset.
Returns:
length of the dataset
"""
return len(self.imgs)
def _load_image(self, index: int) -> Tuple[Tensor, Tensor]:
"""Load a single image and it's class label.
Args:
index: index to return
Returns:
the image
the image class label
"""
img, label = ImageFolder.__getitem__(self, index)
array = np.array(img)
tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]
# Convert from HxWxC to CxHxW
tensor = tensor.permute((2, 0, 1))
label = torch.tensor(label) # type: ignore[attr-defined]
return tensor, label
class IntersectionDataset(GeoDataset):
"""Dataset representing the intersection of two GeoDatasets.
This allows users to do things like:
* Combine image and target labels and sample from both simultaneously
(e.g. Landsat and CDL)
* Combine datasets for multiple image sources for multimodal learning or data fusion
(e.g. Landsat and Sentinel)
These combinations require that all queries are present in *both* datasets,
and can be combined using an :class:`IntersectionDataset`:
.. code-block:: python
dataset = landsat & cdl
"""
def __init__(
self,
dataset1: GeoDataset,
dataset2: GeoDataset,
collate_fn: Callable[
[Sequence[Dict[str, Any]]], Dict[str, Any]
] = concat_samples,
) -> None:
"""Initialize a new Dataset instance.
Args:
dataset1: the first dataset
dataset2: the second dataset
collate_fn: function used to collate samples
Raises:
ValueError: if either dataset is not a :class:`GeoDataset`
"""
super().__init__()
self.datasets = [dataset1, dataset2]
self.collate_fn = collate_fn
for ds in self.datasets:
if not isinstance(ds, GeoDataset):
raise ValueError("IntersectionDataset only supports GeoDatasets")
self._crs = dataset1.crs
self.res = dataset1.res
# Force dataset2 to have the same CRS/res as dataset1
if dataset1.crs != dataset2.crs:
print(
f"Converting {dataset2.__class__.__name__} CRS from "
f"{dataset2.crs} to {dataset1.crs}"
)
dataset2.crs = dataset1.crs
if dataset1.res != dataset2.res:
print(
f"Converting {dataset2.__class__.__name__} resolution from "
f"{dataset2.res} to {dataset1.res}"
)
dataset2.res = dataset1.res
# Merge dataset indices into a single index
self._merge_dataset_indices()
def _merge_dataset_indices(self) -> None:
"""Create a new R-tree out of the individual indices from two datasets."""
i = 0
ds1, ds2 = self.datasets
for hit1 in ds1.index.intersection(ds1.index.bounds, objects=True):
for hit2 in ds2.index.intersection(hit1.bounds, objects=True):
box1 = BoundingBox(*hit1.bounds)
box2 = BoundingBox(*hit2.bounds)
self.index.insert(i, tuple(box1 & box2))
i += 1
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of data/labels and metadata at that index
Raises:
IndexError: if query is not within bounds of the index
"""
if not query.intersects(self.bounds):
raise IndexError(
f"query: {query} not found in index with bounds: {self.bounds}"
)
# All datasets are guaranteed to have a valid query
samples = [ds[query] for ds in self.datasets]
return self.collate_fn(samples)
def __str__(self) -> str:
"""Return the informal string representation of the object.
Returns:
informal string representation
"""
return f"""\
{self.__class__.__name__} Dataset
type: IntersectionDataset
bbox: {self.bounds}
size: {len(self)}"""
class UnionDataset(GeoDataset):
"""Dataset representing the union of two GeoDatasets.
This allows users to do things like:
* Combine datasets for multiple image sources and treat them as equivalent
(e.g. Landsat 7 and Landsat 8)
* Combine datasets for disparate geospatial locations
(e.g. Chesapeake NY and PA)
These combinations require that all queries are present in *at least one* dataset,
and can be combined using a :class:`UnionDataset`:
.. code-block:: python
dataset = landsat7 | landsat8
"""
def __init__(
self,
dataset1: GeoDataset,
dataset2: GeoDataset,
collate_fn: Callable[
[Sequence[Dict[str, Any]]], Dict[str, Any]
] = merge_samples,
) -> None:
"""Initialize a new Dataset instance.
Args:
dataset1: the first dataset
dataset2: the second dataset
collate_fn: function used to collate samples
Raises:
ValueError: if either dataset is not a :class:`GeoDataset`
"""
super().__init__()
self.datasets = [dataset1, dataset2]
self.collate_fn = collate_fn
for ds in self.datasets:
if not isinstance(ds, GeoDataset):
raise ValueError("UnionDataset only supports GeoDatasets")
self._crs = dataset1.crs
self.res = dataset1.res
# Force dataset2 to have the same CRS/res as dataset1
if dataset1.crs != dataset2.crs:
print(
f"Converting {dataset2.__class__.__name__} CRS from "
f"{dataset2.crs} to {dataset1.crs}"
)
dataset2.crs = dataset1.crs
if dataset1.res != dataset2.res:
print(
f"Converting {dataset2.__class__.__name__} resolution from "
f"{dataset2.res} to {dataset1.res}"
)
dataset2.res = dataset1.res
# Merge dataset indices into a single index
self._merge_dataset_indices()
def _merge_dataset_indices(self) -> None:
"""Create a new R-tree out of the individual indices from two datasets."""
i = 0
for ds in self.datasets:
hits = ds.index.intersection(ds.index.bounds, objects=True)
for hit in hits:
self.index.insert(i, hit.bounds)
i += 1
def __getitem__(self, query: BoundingBox) -> Dict[str, Any]:
"""Retrieve image and metadata indexed by query.
Args:
query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
Returns:
sample of data/labels and metadata at that index
Raises:
IndexError: if query is not within bounds of the index
"""
if not query.intersects(self.bounds):
raise IndexError(
f"query: {query} not found in index with bounds: {self.bounds}"
)
# Not all datasets are guaranteed to have a valid query
samples = []
for ds in self.datasets:
if ds.index.intersection(tuple(query)):
samples.append(ds[query])
return self.collate_fn(samples)
def __str__(self) -> str:
"""Return the informal string representation of the object.
Returns:
informal string representation
"""
return f"""\
{self.__class__.__name__} Dataset
type: UnionDataset
bbox: {self.bounds}
size: {len(self)}"""
| 33.607543
| 88
| 0.586187
|
5f588fafa9a52f8cd14bda675f3e912187a27c51
| 4,664
|
py
|
Python
|
uplink/hooks.py
|
sthagen/prkumar-uplink
|
089335833acf60cf9b206278580d856814f4067d
|
[
"MIT"
] | null | null | null |
uplink/hooks.py
|
sthagen/prkumar-uplink
|
089335833acf60cf9b206278580d856814f4067d
|
[
"MIT"
] | null | null | null |
uplink/hooks.py
|
sthagen/prkumar-uplink
|
089335833acf60cf9b206278580d856814f4067d
|
[
"MIT"
] | null | null | null |
"""
This module provides a class for defining custom handling for specific
points of an HTTP transaction.
"""
# Local imports
from uplink import compat
__all__ = ["TransactionHook", "RequestAuditor", "ResponseHandler"]
def _wrap_if_necessary(hook, requires_consumer):
if not requires_consumer:
return _wrap_to_ignore_consumer(hook)
return hook
def _wrap_to_ignore_consumer(hook):
@compat.wraps(hook)
def wrapper(_, *args, **kwargs):
# Expects that consumer is the first argument
return hook(*args, **kwargs)
return wrapper
class TransactionHook(object):
"""
A utility class providing methods that define hooks for specific
points of an HTTP transaction.
"""
def audit_request(self, consumer, request_builder): # pragma: no cover
"""Inspects details of a request before it is sent."""
pass
handle_response = None
"""
Handles a response object from the server.
This method can be undefined (i.e., None), indicating that this hook
does not handle responses.
Args:
response: The received HTTP response.
"""
def handle_exception(
self, consumer, exc_type, exc_val, exc_tb
): # pragma: no cover
"""
Handles an exception thrown while waiting for a response from
the server.
Args:
consumer: The consumer that spawned the failing request.
exc_type: The type of the exception.
exc_val: The exception instance raised.
exc_tb: A traceback instance.
"""
pass
class TransactionHookChain(TransactionHook):
"""
A chain that conjoins several transaction hooks into a single
object.
A method call on this composite object invokes the corresponding
method on all hooks in the chain.
"""
def __init__(self, *hooks):
self._hooks = hooks
self._response_handlers = []
# TODO: If more than one callback exists on the chain, the chain
# expects it can execute each synchronously. Instead, we should
# be smart about this and produces a chained coroutine when all
# callbacks are coroutines, so that the client can execute the
# chain asynchronously. Further, when provided both synchronous
# and asynchronous callbacks, we should raise an exception when
# the order is mixed and split into two chains (one async and
# the other sync) when the order permits separation.
# Adding a synchronous callback to an async request forces the
# request to execute synchronously while running this chain. To
# avoid unnecessarily executing this chain when no callbacks
# exists, we can set the `handle_response` method to null,
# indicating that this hook doesn't handle responses.
response_handlers = [h for h in hooks if h.handle_response is not None]
if not response_handlers:
self.handle_response = None
elif len(response_handlers) == 1:
self.handle_response = response_handlers[0].handle_response
self._response_handlers = response_handlers
def audit_request(self, consumer, request_handler):
for hook in self._hooks:
hook.audit_request(consumer, request_handler)
def handle_response(self, consumer, response):
for hook in self._response_handlers:
response = hook.handle_response(consumer, response)
return response
def handle_exception(self, consumer, exc_type, exc_val, exc_tb):
for hook in self._hooks:
hook.handle_exception(consumer, exc_type, exc_val, exc_tb)
compat.reraise(exc_type, exc_val, exc_tb)
class RequestAuditor(TransactionHook):
"""
Transaction hook that inspects requests using a function provided at
time of instantiation.
"""
def __init__(self, auditor, requires_consumer=False):
self.audit_request = _wrap_if_necessary(auditor, requires_consumer)
class ResponseHandler(TransactionHook):
"""
Transaction hook that handles responses using a function provided at
time of instantiation.
"""
def __init__(self, handler, requires_consumer=False):
self.handle_response = _wrap_if_necessary(handler, requires_consumer)
class ExceptionHandler(TransactionHook):
"""
Transaction hook that handles an exception thrown while waiting for
a response, using the provided function.
"""
def __init__(self, exception_handler, requires_consumer=False):
self.handle_exception = _wrap_if_necessary(
exception_handler, requires_consumer
)
| 32.615385
| 79
| 0.687393
|
f02b79d4db48bf40af5b1d64ef0e22db3fabafc8
| 7,483
|
py
|
Python
|
plugins/modules/oci_marketplace_agreement_facts.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 108
|
2020-05-19T20:46:10.000Z
|
2022-03-25T14:10:01.000Z
|
plugins/modules/oci_marketplace_agreement_facts.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 90
|
2020-06-14T22:07:11.000Z
|
2022-03-07T05:40:29.000Z
|
plugins/modules/oci_marketplace_agreement_facts.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 42
|
2020-08-30T23:09:12.000Z
|
2022-03-25T16:58:01.000Z
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_marketplace_agreement_facts
short_description: Fetches details about one or multiple Agreement resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Agreement resources in Oracle Cloud Infrastructure
- Returns the terms of use agreements that must be accepted before you can deploy the specified version of a package.
- If I(agreement_id) is specified, the details of a single Agreement will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
listing_id:
description:
- The unique identifier for the listing.
type: str
required: true
package_version:
description:
- The version of the package. Package versions are unique within a listing.
type: str
required: true
agreement_id:
description:
- The unique identifier for the agreement.
- Required to get a specific agreement.
type: str
aliases: ["id"]
compartment_id:
description:
- The unique identifier for the compartment.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific agreement
oci_marketplace_agreement_facts:
# required
listing_id: "ocid1.listing.oc1..xxxxxxEXAMPLExxxxxx"
package_version: package_version_example
agreement_id: "ocid1.agreement.oc1..xxxxxxEXAMPLExxxxxx"
# optional
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
- name: List agreements
oci_marketplace_agreement_facts:
# required
listing_id: "ocid1.listing.oc1..xxxxxxEXAMPLExxxxxx"
package_version: package_version_example
# optional
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
agreements:
description:
- List of Agreement resources
returned: on success
type: complex
contains:
id:
description:
- The unique identifier for the agreement.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
content_url:
description:
- The content URL of the agreement.
returned: on success
type: str
sample: content_url_example
signature:
description:
- A time-based signature that can be used to accept an agreement or remove a
previously accepted agreement from the list that Marketplace checks before a deployment.
- Returned for get operation
returned: on success
type: str
sample: signature_example
compartment_id:
description:
- The unique identifier for the compartment.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
author:
description:
- Who authored the agreement.
returned: on success
type: str
sample: ORACLE
prompt:
description:
- Textual prompt to read and accept the agreement.
returned: on success
type: str
sample: prompt_example
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"content_url": "content_url_example",
"signature": "signature_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"author": "ORACLE",
"prompt": "prompt_example"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.marketplace import MarketplaceClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AgreementFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"listing_id",
"package_version",
"agreement_id",
]
def get_required_params_for_list(self):
return [
"listing_id",
"package_version",
]
def get_resource(self):
optional_get_method_params = [
"compartment_id",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.get_agreement,
listing_id=self.module.params.get("listing_id"),
package_version=self.module.params.get("package_version"),
agreement_id=self.module.params.get("agreement_id"),
**optional_kwargs
)
def list_resources(self):
optional_list_method_params = [
"compartment_id",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_agreements,
listing_id=self.module.params.get("listing_id"),
package_version=self.module.params.get("package_version"),
**optional_kwargs
)
AgreementFactsHelperCustom = get_custom_class("AgreementFactsHelperCustom")
class ResourceFactsHelper(AgreementFactsHelperCustom, AgreementFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
listing_id=dict(type="str", required=True),
package_version=dict(type="str", required=True),
agreement_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="agreement",
service_client_class=MarketplaceClient,
namespace="marketplace",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(agreements=result)
if __name__ == "__main__":
main()
| 31.049793
| 121
| 0.648403
|
1ebc4107db903302aaffd24e454dc300550934f0
| 888
|
py
|
Python
|
caliper-workspace/Performance/Read_latency.py
|
weishancc/SPChain
|
1a10d2253b7cead862092df9f740ae768124df39
|
[
"Apache-2.0"
] | 1
|
2021-12-26T07:41:11.000Z
|
2021-12-26T07:41:11.000Z
|
caliper-workspace/Performance/Read_latency.py
|
weishancc/SPChain
|
1a10d2253b7cead862092df9f740ae768124df39
|
[
"Apache-2.0"
] | null | null | null |
caliper-workspace/Performance/Read_latency.py
|
weishancc/SPChain
|
1a10d2253b7cead862092df9f740ae768124df39
|
[
"Apache-2.0"
] | 1
|
2021-07-20T04:25:03.000Z
|
2021-07-20T04:25:03.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 15:27:10 2021
@author: Koma
"""
# Plotted read functions include:
# - readConsent
# - readArtwork
# - getHistoryForArtwork
# - readLog
# - readModel
# - readBalance
# - invokeModel
import matplotlib.pyplot as plt
import util
if __name__ == "__main__":
filterFunc = ['initialConsent', 'grantRevokeConsent', 'uploadArtwork', 'transferArtwork', 'addLog', 'addModel', 'addWallet', 'transferBlalance']
data = util.concat_result(filterFunc, 6) # 6 for latency
fig, ax = plt.subplots(figsize=(10, 6))
util.plot_latency(ax, data)
plt.xticks(range(7), ["10", "20", "30", "40", "50", "60", "70"])
plt.xlabel('txDuration (sec)')
plt.ylabel('Latency (sec)')
plt.title('Read latency different functions under different transaction duration')
plt.show()
| 28.645161
| 149
| 0.626126
|
c6d22c4d2abce021e524d16d012bc4030e580b0d
| 242
|
py
|
Python
|
corelib/core/__init__.py
|
guillaume-florent/corelib
|
9e25b862b5eaf1fe1a9b68af070a743184f992ba
|
[
"MIT"
] | 1
|
2018-05-05T02:11:59.000Z
|
2018-05-05T02:11:59.000Z
|
corelib/core/__init__.py
|
guillaume-florent/corelib
|
9e25b862b5eaf1fe1a9b68af070a743184f992ba
|
[
"MIT"
] | null | null | null |
corelib/core/__init__.py
|
guillaume-florent/corelib
|
9e25b862b5eaf1fe1a9b68af070a743184f992ba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""__init__ module of the core package"""
from __future__ import unicode_literals
# API
from corelib.core.profiling import timeit
from corelib.core.memoize import memoize
from corelib.core.timeout import timeout
| 18.615385
| 42
| 0.768595
|
b48695d9c5281ea3834309a324bcda348ab27867
| 3,038
|
py
|
Python
|
deephyper/benchmark/datasets/airlines.py
|
Z223I/deephyper
|
4fd1054dc22f15197567bdd93c6e7a95a614b8e2
|
[
"BSD-3-Clause"
] | 185
|
2018-11-06T18:49:47.000Z
|
2022-03-31T22:10:41.000Z
|
deephyper/benchmark/datasets/airlines.py
|
Z223I/deephyper
|
4fd1054dc22f15197567bdd93c6e7a95a614b8e2
|
[
"BSD-3-Clause"
] | 108
|
2018-12-17T17:58:05.000Z
|
2022-03-16T10:22:08.000Z
|
deephyper/benchmark/datasets/airlines.py
|
Z223I/deephyper
|
4fd1054dc22f15197567bdd93c6e7a95a614b8e2
|
[
"BSD-3-Clause"
] | 50
|
2018-12-11T20:41:41.000Z
|
2022-02-25T19:50:47.000Z
|
import numpy as np
import openml
from sklearn.preprocessing import LabelEncoder
from sklearn import model_selection
def load_data(
random_state=42,
verbose=False,
test_size=0.33,
valid_size=0.33,
categoricals_to_integers=False,
):
"""Load the "Airlines" dataset from OpenML.
Args:
random_state (int, optional): A numpy `RandomState`. Defaults to 42.
verbose (bool, optional): Print informations about the dataset. Defaults to False.
test_size (float, optional): The proportion of the test dataset out of the whole data. Defaults to 0.33.
valid_size (float, optional): The proportion of the train dataset out of the whole data without the test data. Defaults to 0.33.
categoricals_to_integers (bool, optional): Convert categoricals features to integer values. Defaults to False.
Returns:
tuple: Numpy arrays as, `(X_train, y_train), (X_valid, y_valid), (X_test, y_test)`.
"""
random_state = (
np.random.RandomState(random_state) if type(random_state) is int else random_state
)
dataset = openml.datasets.get_dataset(1169)
if verbose:
print(
f"This is dataset '{dataset.name}', the target feature is "
f"'{dataset.default_target_attribute}'"
)
print(f"URL: {dataset.url}")
print(dataset.description[:500])
X, y, categorical_indicator, ft_names = dataset.get_data(
target=dataset.default_target_attribute
)
# encode categoricals as integers
if categoricals_to_integers:
for (ft_ind, ft_name) in enumerate(ft_names):
if categorical_indicator[ft_ind]:
labenc = LabelEncoder().fit(X[ft_name])
X[ft_name] = labenc.transform(X[ft_name])
n_classes = len(labenc.classes_)
else:
n_classes = -1
categorical_indicator[ft_ind] = (
categorical_indicator[ft_ind],
n_classes,
)
X, y = X.to_numpy(), y.to_numpy()
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=test_size, shuffle=True, random_state=random_state
)
# relative valid_size on Train set
r_valid_size = valid_size / (1.0 - test_size)
X_train, X_valid, y_train, y_valid = model_selection.train_test_split(
X_train, y_train, test_size=r_valid_size, shuffle=True, random_state=random_state
)
return (X_train, y_train), (X_valid, y_valid), (X_test, y_test), categorical_indicator
def test_load_data_airlines():
from deephyper.benchmark.datasets import airlines
import numpy as np
names = ["train", "valid", "test "]
data = airlines.load_data(random_state=42, verbose=True)[:-1]
for (X, y), subset_name in zip(data, names):
print(
f"X_{subset_name} shape: ",
np.shape(X),
f", y_{subset_name} shape: ",
np.shape(y),
)
if __name__ == "__main__":
test_load_data_airlines()
| 33.755556
| 136
| 0.64944
|
75cd7c8b4980332ce8bdbd0e3118348d5996e303
| 154,846
|
py
|
Python
|
python/tvm/relay/frontend/pytorch.py
|
pfk-beta/tvm
|
5ecb8c384a66933fec8c7f033cba03337eb1a726
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/frontend/pytorch.py
|
pfk-beta/tvm
|
5ecb8c384a66933fec8c7f033cba03337eb1a726
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/frontend/pytorch.py
|
pfk-beta/tvm
|
5ecb8c384a66933fec8c7f033cba03337eb1a726
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, too-many-lines, len-as-condition, no-else-return, unused-variable, too-many-nested-blocks
# pylint: disable=consider-iterating-dictionary, invalid-name, unused-argument, unused-variable, broad-except
# pylint: disable=import-outside-toplevel, simplifiable-if-expression, cell-var-from-loop, unnecessary-lambda
# pylint: disable=missing-function-docstring, redefined-builtin
"""PT: PyTorch frontend."""
import functools
import itertools
import math
import sys
import logging
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from .. import analysis as _analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import qnn, transform
from ..expr_functor import ExprMutator
from ..loops import while_loop
from ..prelude import Prelude, StaticTensorArrayOps
from ..ty import Any, TensorType, TupleType
from . import qnn_torch
from .common import AttrCvt, get_relay_op, gru_cell, logger
from .common import infer_shape as _infer_shape
from .common import infer_value as _infer_value
from .common import infer_value_simulated as _infer_value_simulated
from .common import lstm_cell, try_infer_value, unbind
from .pytorch_utils import is_version_greater_than, getattr_attr_name
__all__ = ["from_pytorch"]
# This returns a "subgraph" which puts variables whenever
# the type is known. It also records things to map the input
# nodes to the extracted graph's nodes.
# As Python objects are not round-trippable through C++, and
# our type annotations only live in Python, we need to map
# the we need to map the nodes we get in visiting to the nodes
# we used to construct the graph (they are the same in C++,
# match each other in dictionary lookups, but are not the same
# in Python) by using the hint dictionary filled as
# {node: node for node in nodes} to get the type annotations.
# https://discuss.tvm.apache.org/t/round-tripping-objects-through-the-ffi/8440
class _TypeFinder(ExprMutator):
def __init__(self, types):
super().__init__()
self.counter = 0
self.vars = {}
self.types = types
self.leave = set() # some variables are not inputs
def visit_let(self, let):
self.leave.add(let.var)
return super().visit_let(let)
def visit_function(self, fn):
self.leave.update(fn.params)
return super().visit_function(fn)
def visit(self, expr):
if expr in self.leave:
return super().visit(expr)
if expr in self.vars:
return self.vars[expr]
if isinstance(expr, tvm.relay.Var):
self.vars[expr] = expr
return expr
if expr in self.types:
ty = self.types[expr]
v = tvm.relay.var(f"_{self.counter}", type_annotation=ty)
self.counter += 1
self.vars[expr] = v
return v
v = super().visit(expr)
return v
def _should_construct_dynamic_list(list_construct_node):
# if this list is element-accessed or modified at runtime, generate List ADT
def inplace_add_to_add(op_name):
if op_name == "aten::add_":
return "aten::add"
else:
return op_name
uses = _get_uses(list_construct_node)
for loop_use in filter(lambda use: use.user.kind() == "prim::Loop", uses):
block_input_index = loop_use.offset - 1
block = list(loop_use.user.blocks())[0]
list_loop_var = list(block.inputs())[block_input_index]
uses += _get_uses(list_loop_var.node())
op_names = map(inplace_add_to_add, set(use.user.kind() for use in uses))
list_ops = set(["aten::add", "aten::__getitem__"])
intersect = list_ops.intersection(op_names)
if len(intersect) > 0 and intersect != set(["aten::add"]):
return True
# if add op outputs list, it is dynamic so we need to construct List ADT
for use in filter(lambda use: use.user.kind() in ["aten::add", "aten::add_"], uses):
output_type = _get_node_type(use.user)
if output_type == "ListType":
return True
return False
def _is_int_seq(seq):
# TODO (t-vi): handle non-int constants? (like numpy.intXX)
return len(seq) > 0 and all([isinstance(i, int) for i in seq])
# operator implementation
class PyTorchOpConverter:
"""A helper class for holding PyTorch op converters."""
def __init__(self, prelude, default_dtype):
self.prelude = prelude
self.default_dtype = default_dtype
self.create_convert_map()
self.types = {} # map from nodes to (Relay) type annotations
# this incrementally infers the type, see the comments on the type visitor
# above.
def infer_type(self, node, mod=None):
"""An incremental method to infer the type of a node in the relay graph."""
if node in self.types:
return self.types[node]
if isinstance(node, tvm.relay.Var):
return node.type_annotation
tf = _TypeFinder(types=self.types)
new_node = tf.visit(node)
fn = _function.Function(list(tf.vars.values()), new_node)
new_mod = IRModule({"main": fn})
if mod is not None:
new_mod.update(mod)
new_mod = transform.RemoveUnusedFunctions()(new_mod)
new_mod = transform.InferType()(new_mod)
entry = new_mod["main"]
ty = entry.body.checked_type
self.types[node] = ty
return self.types[node]
def infer_type_with_prelude(self, val):
body = self.infer_type(val, self.prelude.mod)
return body
# list ADT utilities
def convert_to_list_adt(self, py_lst):
elem_tys = [self.infer_type_with_prelude(elem) for elem in py_lst]
msg = "List elements should have identical types"
assert all(map(lambda ty: ty == elem_tys[0], elem_tys)), msg
# get_type returns type_name, ctor1, ..., ctorN
# 1 is nil
_, cons, nil = self.prelude.mod.get_type("List")
adt_lst = nil()
for elem in reversed(py_lst):
adt_lst = cons(elem, adt_lst)
return adt_lst
def map_tensor_array_constructor(self, adt_lst, shape):
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", shape)
static_tensor_array_ops.register()
tensor_create = self.prelude.get_tensor_ctor_static("tensor_constructor", "float32", shape)
return self.prelude.map(tensor_create, adt_lst)
def convert_to_tensor_array(self, adt_lst):
_, cons, nil = self.prelude.mod.get_type("List")
if self.prelude.length(adt_lst) == 0:
return nil()
checked_type = self.infer_type_with_prelude(self.prelude.hd(adt_lst))
shape = checked_type.shape
tensor_array = self.map_tensor_array_constructor(adt_lst, shape)
return tensor_array, tuple(shape)
def infer_shape(self, inputs, mod=None):
"""A method to get the output type of an intermediate node in the graph."""
typ = self.infer_type(inputs, mod=mod)
if hasattr(typ, "shape"):
# Regular operator that outputs tensors
return get_const_tuple(typ.shape)
# The return type is not a tensor, for example List
return typ
def infer_shape_with_prelude(self, inputs):
return self.infer_shape(inputs, mod=self.prelude.mod)
def record_output_type(self, output):
if isinstance(output, tuple):
cleaned_output = [o for o in output if o is not None]
types = self.infer_type_with_prelude(_expr.Tuple(cleaned_output))
for o, t in zip(cleaned_output, types.fields):
self.types[o] = t
elif isinstance(output, _expr.Expr):
self.infer_type_with_prelude(output)
# it can also happen that the type is int or so
def pytorch_promote_types(self, inputs, dtypes):
"""This promotes TVM inputs with TVM dtypes passed like PyTorch would"""
actual_dtypes = []
for i, inp in enumerate(inputs):
if isinstance(inp, _expr.Expr):
idt = self.infer_type(inp).dtype
actual_dtypes.append(idt)
else:
actual_dtypes.append(dtypes[i])
dtypes = actual_dtypes
tensor_dtypes = [dt for inp, dt in zip(inputs, dtypes) if not np.isscalar(inp)]
non_tensor_inputs = [inp for inp in inputs if np.isscalar(inp)]
result_type = _pytorch_result_type(tensor_dtypes, non_tensor_inputs)
results = []
for inp, dt in zip(inputs, dtypes):
if np.isscalar(inp):
results.append(_expr.const(inp, dtype=result_type))
elif dt == result_type:
results.append(inp)
else:
results.append(_op.cast(inp, result_type))
return results
def is_quantized_tensor(self, data):
# If a quantized Torch module is saved and loaded back, dtype will be dropped
# Since dtypes from Torch tensors are not reliable in such cases, we use
# Relay's type inference result to decide if an input tensor is quantized
ty = self.infer_type_with_prelude(data)
return ty.dtype == "uint8"
# Operator implementations
def make_elemwise(self, name):
def elemwise(inputs, input_types):
if name == "divide":
# https://pytorch.org/docs/stable/generated/torch.div.html#torch.div
# None - default behavior. Performs no rounding and, if both input and
# other are integer types, promotes the inputs to the default scalar type.
if all(["int" in input_type for input_type in input_types[:2]]):
input_types[:2] = ["float32"] * 2
cast_inputs = []
for inp in inputs[:2]:
if np.isscalar(inp):
cast_inputs.append(_expr.const(inp, dtype="float32"))
else:
cast_inputs.append(_op.cast(inp, "float32"))
inputs[:2] = cast_inputs
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return get_relay_op(name)(data0, data1)
return elemwise
def min_max_common(self, name_elemwise, name_reduce, inputs, input_types):
if len(inputs) == 1:
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
return get_relay_op(name_reduce)(data[0])
elif len(inputs) >= 2 and isinstance(inputs[1], (list, int)):
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
dim = inputs[1]
keepdims = inputs[2] if len(inputs) > 2 else False
# also return dummy indices
return get_relay_op(name_reduce)(data[0], axis=dim, keepdims=keepdims), None
else:
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return get_relay_op(name_elemwise)(data0, data1)
def max(self, inputs, input_types):
return self.min_max_common("maximum", "max", inputs, input_types)
def min(self, inputs, input_types):
return self.min_max_common("minimum", "min", inputs, input_types)
def make_unary(self, name):
def unary(inputs, input_types):
# this is just to ensure tensor input
(data,) = self.pytorch_promote_types(inputs[:1], input_types[:1])
return get_relay_op(name)(data)
return unary
def log1p(self, inputs, input_types):
# 1_plus_log x = log(x + 1)
(dtype,) = input_types
one = _expr.const(1, dtype=dtype)
return _op.log(inputs[0] + one)
def square(self, inputs, input_types):
(dtype,) = input_types
return _op.power(inputs[0], _expr.const(2, dtype))
def arange(self, inputs, input_types):
def _get_value(val, dtype):
# dtype is a tvm dtype
if isinstance(val, _expr.Expr):
inp = _op.cast(val, dtype)
ret, _ = try_infer_value(inp, lambda ret: _expr.const(ret, dtype))
else:
ret = _create_typed_const(val, dtype)
return ret
def _get_type(val, inp_type):
if isinstance(val, _expr.Expr):
dtype = str(self.infer_type(val))
return dtype
return inp_type
# PyTorch arange uses the following type semantics:
# - if a dtype is given, start, stop, step are converted to that dtype
# - if no dtype is given and all args are integral, dtype is int64
# - if no dtype is given and there is a float arg, dtype is float32
if len(inputs) == 5:
dtype0 = _get_type(inputs[0], input_types[0])
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
elif dtype0.startswith("float"):
dtype = "float32"
else:
dtype = "int64"
start = _expr.const(0, dtype)
stop = _get_value(inputs[0], dtype)
step = _expr.const(1, dtype)
elif len(inputs) == 7:
types = [_get_type(inputs[i], input_types[i]) for i in range(3)]
if inputs[3] is not None:
dtype = _convert_dtype_value(inputs[3])
elif any([t.startswith("float") for t in types]):
dtype = "float32"
else:
dtype = "int64"
start = _get_value(inputs[0], dtype)
stop = _get_value(inputs[1], dtype)
step = _get_value(inputs[2], dtype)
else:
msg = "Unknown number of arguments (%d) to parse." % (len(inputs))
raise AssertionError(msg)
return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)
def squeeze(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 1:
axis = None
else:
# TODO (t-vi): why is the cast to int needed? similarly elsewhere
axis = [int(inputs[1])]
return _op.transform.squeeze(data, axis)
def unsqueeze(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
return _op.transform.expand_dims(data, int(axis), 1)
def concatenate(self, inputs, input_types):
def tensor_array_concat(lst, axis):
assert axis == 0, "Tensor array concat supported only for axis 0"
tensor_array, shape = self.convert_to_tensor_array(lst)
concat_shape = (Any(),) + shape[1:]
concat = self.prelude.get_global_var_static("tensor_array_concat", "float32", shape)
concatenated = concat(tensor_array)
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", concat_shape)
static_tensor_array_ops.register()
get_tensor = self.prelude.get_global_var_static(
"tensor_get_data", "float32", concat_shape
)
return get_tensor(concatenated)
data = inputs[0]
axis = inputs[1]
if not isinstance(data, list):
return tensor_array_concat(data, axis)
if isinstance(data, _expr.Expr):
data = [data]
return _op.tensor.concatenate(data, int(axis))
def slice(self, inputs, input_types):
axis_dtype = "int64"
index_size_limit = sys.maxsize
data = inputs[0]
dshape = self.infer_shape(data)
ndim = len(dshape)
dim = int(inputs[1])
stride = inputs[4]
target_begin, is_begin_const = try_infer_value(
inputs[2], lambda ret: ret.astype(np.int).item(0)
)
target_end, is_end_const = try_infer_value(
inputs[3], lambda ret: ret.astype(np.int).item(0)
)
# A fast path when slicing is nop.
if (
isinstance(target_begin, int)
and isinstance(target_end, int)
and target_begin == 0
and target_end >= index_size_limit
and stride == 1
):
return data
if target_begin is None and target_end is None:
return data
# Process begin
begin = [0] * ndim
if target_begin is not None:
begin[dim] = target_begin
if target_begin is not None and not isinstance(begin[dim], int):
tmp = []
for b in begin:
if isinstance(b, int):
tmp.append(_op.expand_dims(_expr.const(b, axis_dtype), axis=0))
else:
tmp.append(_op.cast(_op.expand_dims(b, axis=0), axis_dtype))
begin = _op.concatenate(tmp, axis=0)
btype = self.infer_type(begin).dtype
if str(btype) != axis_dtype:
begin = _op.cast(begin, axis_dtype)
# Process end
if isinstance(target_end, int) and target_end >= index_size_limit:
target_end = dshape[dim]
if any([isinstance(d, tvm.tir.Any) for d in dshape]):
end = _op.shape_of(data)
else:
end = dshape
if isinstance(target_end, int):
if isinstance(end, list):
end[dim] = target_end
else:
all_static = True
for i, shape_dim in enumerate(dshape):
if i != dim and isinstance(shape_dim, tvm.tir.Any):
all_static = False
if all_static:
end = list(get_const_tuple(dshape))
end[dim] = target_end
else:
target_end = _expr.const(target_end)
end = _op.scatter(
end,
_op.expand_dims(_expr.const(dim), axis=0),
_op.expand_dims(target_end, axis=0),
axis=0,
)
else:
end = _op.cast(_op.shape_of(data), axis_dtype)
if target_end is not None and not isinstance(target_end, tvm.tir.Any):
ttype = self.infer_type(target_end).dtype
if str(ttype) != axis_dtype:
target_end = _op.cast(target_end, axis_dtype)
end = _op.scatter(
end,
_op.expand_dims(_expr.const(dim), axis=0),
_op.expand_dims(target_end, axis=0),
axis=0,
)
if not isinstance(end, list):
etype = self.infer_type(end).dtype
if str(etype) != axis_dtype:
end = _op.cast(end, axis_dtype)
strides = [1] * ndim
strides[dim] = stride
return _op.transform.strided_slice(
data, begin=begin, end=end, strides=strides, slice_mode="end"
)
def narrow(self, inputs, input_types):
# Inputs are:
# 0 - the tensor to narrow
# 1 - the dimension along which to narrow
# 2 - the starting dimension
# 3 - the distance to the ending dimension
# Lets find the ending dimension
end = self.add(inputs[2:4], input_types[2:4])
stride = 1
slice_input = inputs[:3] + [end, stride]
slice_types = input_types + ["int32"]
return self.slice(slice_input, slice_types)
def split(self, inputs, input_types):
data = inputs[0]
split_size = int(inputs[1])
dim = int(inputs[2])
split_index = split_size
indices = []
while split_index < self.infer_shape(data)[dim]:
indices.append(split_index)
split_index += split_size
return _op.split(data, indices, dim)
def split_with_sizes(self, inputs, input_types):
data = inputs[0]
sections = inputs[1]
dim = int(inputs[2])
if len(sections) == 1:
# a special case used in torchvision detection models
return _expr.TupleWrapper(_expr.Tuple([data]), 1)
split_index = 0
indices = []
for i in range(len(sections) - 1):
index, _ = try_infer_value(sections[i], lambda ret: int(ret))
split_index += index
indices.append(split_index)
return _op.split(data, indices, dim)
def select(self, inputs, input_types):
data = inputs[0]
dim = int(inputs[1])
index = _wrap_const(inputs[2])
return _op.transform.take(data, index, axis=dim, mode="wrap")
def take(self, inputs, input_types):
data = inputs[0]
indices = _op.cast(inputs[1], "int32")
return _op.transform.take(data, indices=indices, mode="wrap")
def topk(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[2])
is_ascend = not bool(inputs[3])
sort = bool(inputs[4])
if isinstance(inputs[1], _expr.Expr):
k, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())
else:
k = inputs[1]
if not sort:
msg = "Currently supports only sorted output for topk operator."
raise AssertionError(msg)
outs = _op.topk(data, k=k, axis=axis, is_ascend=is_ascend, ret_type="both", dtype="int64")
return outs[0], outs[1]
def reciprocal(self, inputs, input_types):
data = inputs[0]
return _expr.const(1.0, dtype=input_types[0]) / data
def repeat(self, inputs, input_types):
data = inputs[0]
reps = []
for r in inputs[1]:
if isinstance(r, int):
reps.append(r)
else:
reps.append(int(_infer_value(r, {}).numpy()))
return _op.transform.tile(data, reps=reps)
def repeat_interleave(self, inputs, input_types):
data = inputs[0]
if isinstance(inputs[1], int):
repeats = inputs[1]
axis = inputs[2]
elif isinstance(inputs[1], _expr.Expr):
if isinstance(inputs[1], _expr.Constant):
repeats = int(inputs[1].data.numpy())
else:
repeats, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())
axis = inputs[2]
else:
msg = "Only repeat with one value as repeat is currently supported."
raise AssertionError(msg)
if axis is None: # Flatten the data if no axis is given from torch
data = _op.transform.reshape(data, [-1])
axis = 0
return _op.transform.repeat(data, repeats=repeats, axis=axis)
def addcdiv(self, inputs, input_types):
data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])
return data + (c * (t1 / t2))
def addcmul(self, inputs, input_types):
data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])
return data + (c * (t1 * t2))
def where(self, inputs, input_types):
if len(inputs) == 1:
return self.nonzero([inputs[0], True], input_types)
cond = inputs[0]
x, y = self.pytorch_promote_types(inputs[1:3], input_types[1:3])
return _op.where(cond, x, y)
def full_impl(self, data, fill_value, dtype):
size = []
need_reshape = False
new_shape = []
for dim in data:
if isinstance(dim, _expr.Expr):
if isinstance(dim, _expr.Constant):
dim = int(dim.data.numpy())
if isinstance(size, list):
size.append(dim)
new_shape.append(dim)
else:
dim, success = try_infer_value(dim, lambda ret: int(ret), lambda: 0)
new_shape.append(dim)
if success:
if isinstance(size, list):
size.append(dim)
else:
size = None
need_reshape = True
else:
if isinstance(size, list):
size.append(dim)
new_shape.append(dim)
if size is None:
tmp = []
for dim in data:
tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64"))
size = _op.concatenate(tmp, axis=0)
out = _op.full(_expr.const(fill_value, dtype=dtype), size, dtype=dtype)
if need_reshape:
out = _op.reshape(out, new_shape)
return out
def ones(self, inputs, input_types):
data = inputs[0]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in ones op" % (type(data))
raise AssertionError(msg)
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
return self.full_impl(data, 1, dtype)
def ones_like(self, inputs, input_types):
data = inputs[0]
out = _op.ones_like(data)
# If the input and the output datatype is different, do a cast
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
if input_types[0] != dtype:
out = _op.cast(out, dtype)
return out
def zeros(self, inputs, input_types):
data = inputs[0]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in zeros op" % (type(data))
raise AssertionError(msg)
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
return self.full_impl(data, 0, dtype)
def zeros_like(self, inputs, input_types):
data = inputs[0]
out = _op.zeros_like(data)
# If the input and the output datatype is different, do a cast
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
if input_types[0] not in dtype:
out = _op.cast(out, dtype)
return out
def full(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in full op" % (type(data))
raise AssertionError(msg)
if inputs[2] is not None: # dtype given
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, torch uses a global default set by torch.set_default_tensor_type()
dtype = self.default_dtype
return self.full_impl(data, fill_value, dtype)
def full_like(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
out = _op.full_like(data, _expr.const(fill_value))
# If the input and the output datatype is different, do a cast
if inputs[2] is not None: # dtype given
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, torch uses a global default set by torch.set_default_tensor_type()
dtype = self.default_dtype
if input_types[0] not in dtype:
out = _op.cast(out, dtype)
return out
def linspace(self, inputs, input_types):
start = inputs[0]
stop = inputs[1]
step = inputs[2]
# Find the spacing between values as step
if step != 1:
step = (stop - start) / (step - 1)
stop = stop + step
else:
stop = start + step
if inputs[3] is None:
import torch
dtype = _convert_data_type(str(torch.get_default_dtype()))
else:
dtype = _convert_dtype_value(inputs[3])
start = _create_typed_const(start, dtype)
stop = _create_typed_const(stop, dtype)
step = _create_typed_const(step, dtype)
return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)
def relu(self, inputs, input_types):
data = inputs[0]
if self.is_quantized_tensor(data):
assert len(inputs) == 3, "Input quant param not found in op inputs"
input_zero_point = _expr.const(inputs[2], dtype="int32")
return qnn_torch.quantized_relu(data, input_zero_point)
return _op.nn.relu(data)
def prelu(self, inputs, input_types):
# Reference: https://pytorch.org/docs/stable/generated/torch.nn.PReLU.html#torch.nn.PReLU
data = inputs[0]
dim = self.get_dims(data)
ndims = len(dim)
axis = 0 if ndims == 1 else 1
alpha = _op.broadcast_to(inputs[1], (dim[axis]))
return _op.nn.prelu(data, alpha, axis)
def leaky_relu(self, inputs, input_types):
data = inputs[0]
alpha = float(inputs[1])
return _op.nn.leaky_relu(data, alpha)
def elu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
alpha = _expr.const(-float(inputs[1]), dtype=dtype)
return alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)
def celu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
alpha = _expr.const(float(inputs[1]), dtype=dtype)
return alpha * _op.nn.relu(
_expr.const(1, dtype=dtype) - _op.exp(data / alpha)
) + _op.nn.relu(data)
def gelu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
# gelu is data * normcdf(data)
# normcdf expressed as erf because we don't currently have that intrinsic
# note that there is also a fastgelu variant approximating normcdf
# with tanh and third order polynomials, but this is "true" gelu
return data * (
_expr.const(0.5, dtype=dtype)
+ _op.erf(data * _expr.const(0.5**0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype)
)
def selu(self, inputs, input_types):
data = inputs[0]
# https://pytorch.org/docs/stable/nn.html#selu
dtype = input_types[0]
alpha = _expr.const(-1.6732632423543772848170429916717, dtype=dtype)
gamma = _expr.const(1.0507009873554804934193349852946, dtype=dtype)
return gamma * (
alpha * _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)
)
def silu(self, inputs, input_types):
data = inputs[0]
return data * _op.tensor.sigmoid(data)
def log_sigmoid(self, inputs, input_types):
data = inputs[0]
return _op.log(_op.tensor.sigmoid(data))
def hard_sigmoid(self, inputs, input_types):
def _relu6(x):
return _op.tensor.clip(x, 0.0, 6.0)
def func(x):
return _relu6(x + _expr.const(3.0)) / _expr.const(6.0)
if self.is_quantized_tensor(inputs[0]):
input_scale = _expr.const(inputs[1])
input_zero_point = _expr.const(inputs[2])
# PyTorch seems to use the following output qparams, but accuracy
# is broken if we use this.
# TODO(masahi): Revisit this parameter choice
#
# Taken from src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
# output_scale = _expr.const(0.00390625) # 1.0 / 2^8
# output_zero_point = _expr.const(-128)
output_scale = input_scale
output_zero_point = input_zero_point
data = qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)
out = func(data)
return qnn.op.quantize(out, output_scale, output_zero_point, out_dtype="uint8")
return func(inputs[0])
def hard_swish(self, inputs, input_types):
data = inputs[0]
return data * self.hard_sigmoid(inputs, input_types)
def adaptive_avg_pool(self, op, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
def func(x):
return op(x, output_size=output_size)
if self.is_quantized_tensor(data):
return qnn_torch.apply_with_upcast(data, func)
return func(data)
def adaptive_max_pool(self, op, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
# returns dummy indices too
return op(data, output_size=output_size), None
@staticmethod
def convert_const_list(data):
if isinstance(data, list):
for i, _ in enumerate(data):
if isinstance(data[i], _expr.Expr):
data[i] = int(_infer_value_simulated(data[i], {}).numpy())
return data
def maxpool_2d(self, inputs, input_types):
data = inputs[0]
pool_size = self.convert_const_list(inputs[1])
strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool2d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout="NCHW",
ceil_mode=ceil_mode,
)
def maxpool_2d_with_indices(self, inputs, input_types):
# returns dummy indices too
return self.maxpool_2d(inputs, input_types), None
def maxpool_1d(self, inputs, input_types):
data = inputs[0]
pool_size = inputs[1]
strides = inputs[2] if inputs[2] else pool_size
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool1d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout="NCW",
ceil_mode=ceil_mode,
)
def maxpool_3d(self, inputs, input_types):
data = inputs[0]
pool_size = inputs[1]
strides = inputs[2] if inputs[2] else pool_size
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool3d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
ceil_mode=ceil_mode,
)
def hardtanh(self, inputs, input_types):
a = inputs[0]
tanh_min = float(inputs[1])
tanh_max = float(inputs[2])
return _op.tensor.clip(a, tanh_min, tanh_max)
def convolution(self, inputs, input_types):
# Use transpose or normal
use_transpose = True if inputs[6] == 1 else False
data = inputs[0]
weight = inputs[1]
bias = inputs[2]
strides = tuple(inputs[3])
padding = tuple(inputs[4])
dilation = tuple(inputs[5])
if isinstance(weight, _expr.Expr):
inferred_shape = self.infer_shape(weight)
weight_shape = []
for infer in inferred_shape:
weight_shape.append(infer)
else:
msg = "Data type %s could not be parsed in conv op" % (type(weight))
raise AssertionError(msg)
groups = int(inputs[8])
if use_transpose:
channels = weight_shape[1] * groups
in_channels = weight_shape[0]
else:
channels = weight_shape[0]
in_channels = weight_shape[1]
# Check if this is depth wise convolution
# We need to reshape weight so that Relay could recognize this is depth wise
# weight_shape[1] is always in_channels // groups
# For depthwise, in_channels == groups, so weight_shape[1] == 1
# If groups > 1 but weight_shape[1] != 1, this is group convolution
if groups > 1 and in_channels == 1:
channel_multiplier = channels // groups
new_weight_shape = (groups, channel_multiplier) + tuple(weight_shape[2:])
weight = _op.transform.reshape(weight, new_weight_shape)
kernel_size = weight_shape[2:]
use_bias = isinstance(bias, _expr.Expr)
# We are trying to invoke various relay operations through a single conv_op variable.
# However the function signatures for some operations have additional attributes so we
# pass these in along with the standard ones.
additional_arguments = dict()
if use_transpose:
if len(kernel_size) == 3:
conv_op = _op.nn.conv3d_transpose
elif len(kernel_size) == 2:
conv_op = _op.nn.conv2d_transpose
else:
conv_op = _op.nn.conv1d_transpose
output_padding = tuple(inputs[7])
additional_arguments["output_padding"] = output_padding
else:
if len(kernel_size) == 3:
conv_op = _op.nn.conv3d
elif len(kernel_size) == 2:
conv_op = _op.nn.conv2d
else:
conv_op = _op.nn.conv1d
if len(kernel_size) == 3:
data_layout = "NCDHW"
kernel_layout = "OIDHW"
elif len(kernel_size) == 2:
data_layout = "NCHW"
kernel_layout = "OIHW"
if use_transpose:
# Transposed convolutions have IOHW layout.
kernel_layout = "IOHW"
else:
data_layout = "NCW"
kernel_layout = "OIW"
# Conv1d does not currently support grouped convolution so we convert it to conv2d
is_grouped_conv1d = False
if groups > 1 and len(kernel_size) == 1 and not use_transpose:
is_grouped_conv1d = True
conv_op = _op.nn.conv2d
kernel_size = [1] + kernel_size
strides = (1,) + strides
padding = (0,) + padding
dilation = (1,) + dilation
data = _op.expand_dims(data, axis=2)
weight = _op.expand_dims(weight, axis=2)
data_layout = "NCHW"
kernel_layout = "OIHW"
conv_out = conv_op(
data,
weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout="",
out_dtype="",
**additional_arguments,
)
if use_bias:
res = _op.nn.bias_add(conv_out, bias)
else:
res = conv_out
if is_grouped_conv1d:
# Because we conducted grouped conv1d convolution through conv2d we must
# squeeze the output to get the correct result.
res = _op.squeeze(res, axis=[2])
return res
def softmax(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
if isinstance(axis, str):
axis = int(axis)
return _op.nn.softmax(data, axis=axis)
def threshold(self, inputs, input_types):
data = inputs[0]
return _op.nn.relu(data)
def contiguous(self, inputs, input_types):
return inputs[0]
def batch_norm(self, inputs, input_types):
data = inputs[0]
data_type = input_types[0]
channels = self.infer_shape(data)
if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):
scale = center = True
weight = inputs[1]
beta = inputs[2]
gamma = weight
else:
scale = center = False
if not scale:
gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)
if not center:
beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)
moving_mean = inputs[3]
moving_var = inputs[4]
epsilon = float(inputs[7])
return _op.nn.batch_norm(
data,
gamma,
beta,
moving_mean,
moving_var,
axis=1,
epsilon=epsilon,
center=center,
scale=scale,
)[0]
def instance_norm(self, inputs, input_types):
data = inputs[0]
data_type = input_types[0]
channels = self.infer_shape(data)
if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):
scale = center = True
weight = inputs[1]
beta = inputs[2]
gamma = weight
else:
scale = center = False
if not scale:
gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)
if not center:
beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)
epsilon = float(inputs[7])
return _op.nn.instance_norm(
data, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale
)
def get_dims(self, data):
import torch
if isinstance(data, _expr.Expr):
dims = self.infer_shape(data)
elif isinstance(data, list):
dims = data
elif isinstance(data, (torch.Tensor, np.ndarray)):
dims = data.shape
else:
msg = "Data type %s could not be parsed" % type(data)
raise AssertionError(msg)
return dims
def layer_norm(self, inputs, input_types):
data = inputs[0]
ndims = len(self.get_dims(inputs[1]))
assert ndims == 1, "Support only normalization over last one dimension."
return _op.nn.layer_norm(
data,
gamma=inputs[2],
beta=inputs[3],
axis=-1,
epsilon=float(inputs[4]),
center=True,
scale=True,
)
def group_norm(self, inputs, input_types):
data = inputs[0]
gamma = inputs[2]
beta = inputs[3]
num_groups = inputs[1]
epsilon = float(inputs[4])
return _op.nn.group_norm(
data,
gamma=gamma,
beta=beta,
num_groups=num_groups,
axis=1,
epsilon=epsilon,
center=True,
scale=True,
)
def transpose(self, inputs, input_types):
data = inputs[0]
import torch
if isinstance(data, _expr.Expr):
ndims = len(self.infer_shape_with_prelude(data))
elif isinstance(data, list):
ndims = data
elif isinstance(data, (torch.Tensor, np.ndarray)):
ndims = data.shape
else:
msg = "Data type %s could not be parsed in transpose op" % (type(data))
raise AssertionError(msg)
if isinstance(data, tvm.runtime.NDArray):
ndims = len(data.shape)
axes = list(range(ndims))
num_inputs = len(inputs)
if num_inputs == 1:
if ndims >= 2:
axes[-1] = ndims - 2
axes[-2] = ndims - 1
if not isinstance(data, _expr.Expr):
data = _expr.const(data)
elif num_inputs == 3:
parse = lambda i: ndims * (i < 0) + i
src, dst = [parse(int(inputs[i])) for i in [1, 2]]
axes[src] = dst
axes[dst] = src
else:
axes = inputs[1]
return _op.transform.transpose(data, axes)
def flatten(self, inputs, input_types):
data = inputs[0]
start = int(inputs[1])
end = int(inputs[2])
dshape = get_const_tuple(self.infer_shape_with_prelude(data))
ndim = len(dshape)
if start < 0:
start += ndim
if end < 0:
end += ndim
assert start <= end, "start dim cannot come after end dim"
new_shape = [0] * start
new_shape.append(-1)
squeeze_axes = []
for i in range(start + 1, end + 1):
new_shape.append(1)
squeeze_axes.append(i)
for _ in range(end + 1, ndim):
new_shape.append(0)
out = _op.reshape(data, new_shape)
if squeeze_axes:
out = _op.squeeze(out, axis=squeeze_axes)
return out
def addmm(self, inputs, input_types):
input_mat = inputs[0]
mat1 = inputs[1]
data_type = input_types[1]
mat2 = inputs[2]
beta = inputs[3]
alpha = inputs[4]
if not isinstance(alpha, _expr.Expr) and alpha != 1:
alpha = _create_typed_const(alpha, data_type)
mat1 *= alpha
if not isinstance(beta, _expr.Expr) and beta != 1:
beta = _create_typed_const(beta, data_type)
mat2 *= beta
transposed_mat2 = _op.transform.transpose(mat2, axes=[1, 0])
units = self.infer_shape(transposed_mat2)[0]
dense_out = _op.nn.dense(mat1, transposed_mat2, units=units)
return dense_out + input_mat
def size(self, inputs, input_types):
shape = self.infer_shape_with_prelude(inputs[0])
axis = None
if len(inputs) > 1:
axis = int(inputs[1])
if any(map(lambda s: isinstance(s, tvm.tir.expr.Any), shape)):
if axis is None or isinstance(shape[axis], tvm.tir.expr.Any):
shape_dynamic = _op.shape_of(inputs[0], dtype="int32")
if axis is not None:
return _op.take(shape_dynamic, _expr.const(axis), 0)
return shape_dynamic
if axis is not None:
return _expr.const(shape[axis])
return _expr.const(shape)
def numtotensor(self, inputs, input_types):
val = inputs[0]
dtype = input_types[0]
if isinstance(val, _expr.Expr):
return val
if isinstance(val, tvm.tir.IntImm):
val = val.__int__()
dtype = int
arr = val * np.ones([]).astype(dtype)
return arr
def tensortonum(self, inputs, input_types):
return inputs[0]
def view(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 3:
shape_inp = [inputs[1], self.infer_shape(inputs[2])[0]]
else:
if isinstance(inputs[1], list):
shape_inp = inputs[1]
else:
shape_inp = self.infer_shape(inputs[1])
new_shape = shape_inp
for i, shape in enumerate(shape_inp):
if isinstance(shape, _expr.Expr):
val = _infer_value_simulated(shape, {})
new_shape[i] = val.numpy().item(0)
return _op.transform.reshape(data, new_shape)
def reshape(self, inputs, input_types):
data = inputs[0]
new_shape = inputs[1]
tmp_shape = []
is_dyn = False
for s in new_shape:
if isinstance(s, _expr.Constant):
tmp_shape.append(int(s.data.numpy()))
elif isinstance(s, _expr.Expr):
dim, success = try_infer_value(s, lambda ret: int(ret))
tmp_shape.append(dim)
if not success:
is_dyn = True
else:
tmp_shape.append(s)
if is_dyn:
new_shape = []
for i, s in enumerate(tmp_shape):
if not isinstance(s, _expr.Expr):
s = _expr.const(s, "int64")
else:
s = _op.cast(s, "int64")
new_shape.append(_op.expand_dims(s, axis=0))
new_shape = _op.concatenate(new_shape, axis=0)
else:
new_shape = tmp_shape
return _op.transform.reshape(data, new_shape)
def pixel_shuffle(self, inputs, input_types):
data = inputs[0]
upscale_factor = inputs[1]
upscale_squared = upscale_factor * upscale_factor
b, c, h, w = self.infer_shape(data)
assert (
c % upscale_squared == 0
), "input channel should be divisible by square of upscale_factor"
ndims = len(self.infer_shape_with_prelude(data))
axes = list(range(ndims))
num_inputs = len(inputs)
oc = c // upscale_squared
oh = h * upscale_factor
ow = w * upscale_factor
new_shape = [b, oc, upscale_factor, upscale_factor, h, w]
out_shape = [b, oc, oh, ow]
data = _op.transform.reshape(data, new_shape)
# The data will be transposed to
# [b, oc, h, upscale_factor, w, upscale_factor]
# for further reshape
axes = [0, 1, 4, 2, 5, 3]
data = _op.transform.transpose(data, axes)
return _op.transform.reshape(data, out_shape)
def clone(self, inputs, input_types):
data = inputs[0]
return _op.tensor.copy(data)
def log_softmax(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
return _op.nn.log_softmax(data, axis)
def sigmoid(self, inputs, input_types):
data = inputs[0]
def func(x):
return _op.tensor.sigmoid(x)
if self.is_quantized_tensor(data):
assert len(inputs) == 3, "Input quant param not found in op inputs"
input_scale = _expr.const(inputs[1])
input_zero_point = _expr.const(inputs[2])
return qnn_torch.apply_with_fp32_fallback(data, input_scale, input_zero_point, func)
return func(data)
def softplus(self, inputs, input_types):
dtype = input_types[0]
beta = _expr.const(float(inputs[1]), dtype=dtype)
return _op.log(_op.exp(inputs[0] * beta) + _expr.const(1.0, dtype=dtype)) / beta
def make_avg_pool(self, dim):
def avg_pool(inputs, input_types):
data = inputs[0]
pool_size = self.convert_const_list(inputs[1])
strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)
padding = inputs[3]
ceil_mode = int(inputs[4])
count_include_pad = int(inputs[5])
def func(x):
if dim == 1:
return _op.nn.avg_pool1d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1,),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
elif dim == 2:
return _op.nn.avg_pool2d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1, 1),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
elif dim == 3:
return _op.nn.avg_pool3d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1, 1, 1),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
msg = "Average Pooling dimension should be between 1 and 3"
raise RuntimeError(msg)
if self.is_quantized_tensor(data):
return qnn_torch.apply_with_upcast(data, func)
return func(data)
return avg_pool
def linear(self, inputs, input_types):
# https://pytorch.org/docs/stable/nn.functional.html#linear
# 0 - input
# 1 - weight
bias = inputs[2]
a_shape = self.infer_shape_with_prelude(inputs[0])
b_shape = self.infer_shape_with_prelude(inputs[1])
if len(a_shape) == 2 and len(b_shape) == 2:
mm_out = _op.nn.dense(inputs[0], inputs[1])
elif len(b_shape) == 1:
mm_out = self.matmul([inputs[0], inputs[1]], input_types[:2])
else:
mm_out = self.matmul(
[inputs[0], _op.transpose(inputs[1], axes=(1, 0))], input_types[:2]
)
if isinstance(bias, _expr.Expr):
bias_ndims = len(self.infer_shape_with_prelude(bias))
if bias_ndims == 1:
return _op.nn.bias_add(mm_out, bias, axis=-1)
mm_dtype = self.infer_type_with_prelude(mm_out).dtype
return self.add([mm_out, bias], [mm_dtype, input_types[2]])
return mm_out
def dropout(self, inputs, input_types):
data = inputs[0]
rate = float(inputs[1])
return _op.nn.dropout(data, rate)
def make_reduce(self, name):
def reduce(inputs, input_types):
data = inputs[0]
axis = None
keepdims = False
if len(inputs) > 2: # default, torch have only data, axis=None, keepdims=False
if isinstance(inputs[1], int):
axis = int(inputs[1])
elif _is_int_seq(inputs[1]):
axis = inputs[1]
else:
axis = list(self.infer_shape(inputs[1]))
keepdims = bool(inputs[2])
return get_relay_op(name)(data, axis=axis, keepdims=keepdims)
return reduce
def norm(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
axis = None
keepdims = False
if len(inputs) > 3:
axis = inputs[2]
keepdims = bool(inputs[3])
order = inputs[1]
if order == np.inf:
return _op.reduce.max(_op.abs(data), axis=axis, keepdims=keepdims)
elif order == np.NINF:
return _op.reduce.min(_op.abs(data), axis=axis, keepdims=keepdims)
else:
reci_order = _expr.const(1.0 / order, dtype=dtype)
order = _expr.const(order)
return _op.power(
_op.reduce.sum(_op.power(_op.abs(data), order), axis=axis, keepdims=keepdims),
reci_order,
)
def frobenius_norm(self, inputs, input_types):
data = inputs[0]
axis = None
keepdims = False
if len(inputs) > 2:
axis = inputs[1] if len(inputs[1]) > 0 else None
keepdims = bool(inputs[2])
return _op.sqrt(_op.reduce.sum((data * data), axis=axis, keepdims=keepdims))
def std(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
return _op.reduce.std(data, axis=axis, keepdims=keepdims, unbiased=unbiased)
def variance(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
return _op.reduce.variance(data, axis=axis, keepdims=keepdims, unbiased=unbiased)
def mean(self, inputs, input_types):
data = inputs[0]
if inputs[1]:
axis = inputs[1]
else:
axis = None
if len(inputs) > 2 and inputs[2]:
keepdims = int(inputs[2])
else:
keepdims = False
if len(inputs) > 3 and inputs[3]:
exclude = int(inputs[3])
else:
exclude = False
def func(x):
return _op.mean(x, axis, keepdims, exclude)
if self.is_quantized_tensor(data):
assert len(inputs) == 6, "Input quant param not found in op inputs"
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
# refer to aten/src/ATen/native/quantized/cpu/qreduction.cpp
return qnn_torch.apply_with_fp32_fallback(data, input_scale, input_zero_point, func)
return func(data)
def var_mean(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
m, v = _op.reduce.mean_variance(data, axis, keepdims, False, unbiased)
return v, m
def chunk(self, inputs, input_types):
data = inputs[0]
num_chunks = int(inputs[1])
axis = int(inputs[2])
if isinstance(data, _expr.Expr):
inferred_shape = self.infer_shape_with_prelude(data)
shape = []
for infer in inferred_shape:
shape.append(infer)
dim = int(shape[axis])
if dim % num_chunks:
unif_size = int(dim / (num_chunks - 1))
else:
unif_size = int(dim / num_chunks)
indeces = []
for i in range(unif_size, dim, unif_size):
indeces.append(i)
return _op.split(data, indeces, axis)
def matmul(self, inputs, input_types):
inputs_0 = inputs[0]
inputs_1 = inputs[1]
# Need to check input shape as batch matmul must be supported.
a_shape = self.infer_shape_with_prelude(inputs_0)
b_shape = self.infer_shape_with_prelude(inputs_1)
# When performing a batch matmul, we need to properly handle N-dim shapes.
if len(a_shape) > 2 and len(b_shape) > 2:
# Convert a into a 3 dimensional tensors.
need_reshape_output = False
if len(a_shape) != 3:
a = _op.reshape(inputs_0, [-1, a_shape[-2], a_shape[-1]])
need_reshape_output = True
else:
a = inputs_0
# Transpose matrix dimensions of b.
trans_axes = list(range(len(b_shape)))
trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]
b = _op.transpose(inputs_1, trans_axes)
# Convert b into a 3 dimensional tensor. Note that the last two dimensions
# are transposed.
if len(b_shape) != 3:
b = _op.reshape(b, [-1, b_shape[-1], b_shape[-2]])
# Perform a batch matmul.
output = _op.nn.batch_matmul(a, b)
# Reshape output to original dimensions.
if need_reshape_output:
return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]])
return output
elif len(a_shape) > 2:
inputs_0 = _op.reshape(inputs_0, [-1, a_shape[-1]])
if len(b_shape) > 2:
trans_axes = list(range(len(b_shape)))
trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]
input_1 = _op.reshape(_op.transpose(inputs_1, trans_axes), [-1, b_shape[-2]])
elif len(b_shape) == 2:
input_1 = _op.transpose(inputs_1, axes=(1, 0))
elif len(b_shape) == 1:
input_1 = _op.expand_dims(inputs_1, 0, 1)
out = _op.nn.dense(inputs_0, input_1)
if len(b_shape) == 1:
out = _op.squeeze(out, axis=[-1])
# Reshape output into a N dimensional tensor when a or b dim > 2
if len(a_shape) > 2:
out = _op.reshape(out, [*a_shape[:-1], b_shape[-1]])
elif len(b_shape) > 2:
out = _op.reshape(out, [a_shape[-2], -1, b_shape[-1]])
out = _op.reshape(
_op.transpose(out, [1, 0, 2]), [*b_shape[:-2], a_shape[-2], b_shape[-1]]
)
return out
def expand(self, inputs, input_types):
data_in = inputs[0]
shape = list(self.infer_shape(data_in))
ndims = len(shape)
sizes = inputs[1]
out = data_in
out_dims = len(sizes)
if ndims < out_dims:
num_newaxis = out_dims - ndims
out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis)
shape = [1] * num_newaxis + shape
for i in range(out_dims):
if sizes[i] != -1 and shape[i] == 1:
if not isinstance(sizes[i], int):
sizes[i] = int(_infer_value(sizes[i], {}).numpy())
out = _op.repeat(out, sizes[i], axis=i)
return out
def int(self, inputs, input_types):
if isinstance(inputs[0], _expr.Expr):
return inputs[0]
return int(inputs[0])
def identity(self, inputs, input_types):
return inputs[0]
def none(self, inputs, input_types):
return None
def make_pad(self, mode):
def pad(inputs, input_types):
data = inputs[0]
if isinstance(inputs[1], list):
pad_list = inputs[1]
else:
pad_list = list(self.infer_shape(inputs[1]))
# initialize paddings based on input len
pad_len = len(self.infer_shape(data)) * 2
paddings = [0] * pad_len
if len(pad_list) >= 2:
paddings[-1] = pad_list[1]
paddings[-2] = pad_list[0]
if len(pad_list) >= 4:
paddings[-3] = pad_list[3]
paddings[-4] = pad_list[2]
if len(pad_list) >= 6:
paddings[-5] = pad_list[5]
paddings[-6] = pad_list[4]
# group into tuple of 2 ints
paddings = [paddings[i : i + 2] for i in range(0, len(paddings), 2)]
const_paddings = []
non_zero_found = False
for pad in paddings:
const_paddings.append([])
for p in pad:
if not isinstance(p, int):
p = int(_infer_value(p, {}).numpy())
const_paddings[-1].append(p)
if p != 0:
non_zero_found = True
if not non_zero_found:
return data
elif mode == "constant":
return _op.nn.pad(data, const_paddings, pad_value=inputs[2], pad_mode=mode)
else:
return _op.nn.pad(data, const_paddings, pad_mode=mode)
return pad
def clamp_common(self, data, min=None, max=None):
def get_v(v, default_v):
if isinstance(v, _expr.Constant):
return float(v.data.numpy())
if isinstance(v, _expr.Expr):
infer_v, success = try_infer_value(v, lambda ret: float(ret))
if success:
return infer_v
if v is not None:
return v
return default_v
dtype = self.infer_type(data).dtype
type_info = np.finfo(dtype) if "float" in dtype else np.iinfo(dtype)
# TODO(masahi): Properly handle inf in a one-way clamp case.
if min is not None and max is not None:
amin = get_v(min, type_info.min)
amax = get_v(max, type_info.max)
elif min is not None:
amin = get_v(min, type_info.min)
amax = type_info.max
else:
amin = type_info.min
amax = get_v(max, type_info.max)
return _op.clip(data, amin, amax)
def clamp(self, inputs, _):
return self.clamp_common(inputs[0], min=inputs[1], max=inputs[2])
def clamp_min(self, inputs, input_types):
return self.clamp_common(inputs[0], min=inputs[1])
def clamp_max(self, inputs, input_types):
return self.clamp_common(inputs[0], max=inputs[1])
def to(self, inputs, input_types):
data = inputs[0]
dtype = inputs[1] if inputs[1] is not None and not isinstance(inputs[1], str) else inputs[2]
# special handling for aten::to(data, 6, _, _, _) case
# 6 means dtype = float
# this happens when converting upsampling with scale factor
cast_map = {
5: "float16",
6: "float32",
7: "float64",
3: "int32",
4: "int64",
}
cast_func = {5: float, 6: float, 7: float, 3: int, 4: int}
ret = data
if isinstance(data, _expr.Expr):
actual_dtype = str(self.infer_type(data).dtype)
if dtype in cast_map and cast_map[dtype] != actual_dtype:
ret = _op.cast(data, cast_map[dtype])
elif dtype in cast_map:
ret = cast_func[dtype](data)
return ret
def get_upsample_out_size(self, inputs, method):
# This assumes a static shape
out_size = []
if inputs[1] is not None:
for size in inputs[1]:
if not isinstance(size, int):
out_size.append(int(_infer_value(size, {}).numpy()))
else:
out_size.append(size)
else:
scale_index = 3 if method != "nearest_neighbor" else 2
scales = inputs[scale_index]
assert scales is not None, "neither out size nor scale provided"
assert isinstance(scales, list)
ishape = self.infer_shape(inputs[0])
for i, scale in enumerate(scales):
out_size.append(int(math.floor(float(ishape[2 + i]) * scale)))
return out_size
def make_upsample(self, method):
def upsample(inputs, input_types):
data = inputs[0]
out_size = self.get_upsample_out_size(inputs, method)
if len(inputs) > 2 and method != "nearest_neighbor":
align_corners = inputs[2]
else:
align_corners = False
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
def func(x):
return _op.image.resize2d(
x, out_size, None, "NCHW", method, coord_trans, cubic_alpha=-0.75
)
if self.is_quantized_tensor(data):
# input qparams are manually appended by us
assert isinstance(inputs[-2], float)
assert isinstance(inputs[-1], int)
input_scale = _expr.const(inputs[-2])
input_zero_point = _expr.const(inputs[-1])
# currently piggy backs to fp32, it gets identical output as torch
return qnn_torch.apply_with_fp32_fallback(data, input_scale, input_zero_point, func)
return func(data)
return upsample
def make_upsample3d(self, method):
def upsample3d(inputs, input_types):
data = inputs[0]
out_size = self.get_upsample_out_size(inputs, method)
if len(inputs) > 2 and method == "linear":
align_corners = inputs[2]
else:
align_corners = False
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
return _op.image.resize3d(data, out_size, None, "NCDHW", method, coord_trans)
return upsample3d
def expand_as(self, inputs, input_types):
target = inputs[1]
t0 = self.infer_type(inputs[0]).dtype
t1 = self.infer_type(inputs[1]).dtype
if str(t0) != str(t1):
target = _op.cast(target, t0)
return _op.broadcast_to_like(inputs[0], target)
def Bool(self, inputs, input_types):
assert len(inputs) == 1
return inputs[0]
def Float(self, inputs, input_types):
assert len(inputs) == 1
return _op.cast(inputs[0], "float32")
def bitwise_not(self, inputs, input_types):
data = inputs[0]
# The input tensor must be of integral or Boolean types.
# For bool tensors, it computes the logical NOT
if input_types[0] == "bool":
out = _op.logical_not(_op.cast(data, "bool"))
else:
out = _op.bitwise_not(_op.cast(data, "int"))
return out
def bitwise_xor(self, inputs, input_types):
lhs = inputs[0]
rhs = inputs[1]
lhs = _op.cast(lhs, "bool") if input_types[0] == "bool" else _op.cast(lhs, "int")
rhs = _op.cast(rhs, "bool") if input_types[1] == "bool" else _op.cast(rhs, "int")
return _op.bitwise_xor(lhs, rhs)
def logical_not(self, inputs, input_types):
data = _wrap_const(inputs[0])
return _op.logical_not(_op.cast(data, "bool"))
def logical_xor(self, inputs, input_types):
lhs = _op.cast(inputs[0], "bool")
rhs = _op.cast(inputs[1], "bool")
return _op.logical_xor(lhs, rhs)
def list_getitem(self, inputs, input_types):
return self.prelude.nth(inputs[0], _wrap_const(inputs[1]))
def list_len(self, inputs, input_types):
return self.prelude.length(inputs[0])
def type_as(self, inputs, input_types):
assert len(inputs) == 2
assert len(input_types) == 2
return _op.cast(inputs[0], input_types[1])
def gather(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
indices = inputs[2]
return _op.gather(data, axis, indices)
def add(self, inputs, input_types):
# add_ is overloaded for tensor add and list concat
if input_types[0] == "ListType":
return self.prelude.concat(inputs[0], inputs[1])
return self.make_elemwise("add")(inputs, input_types)
def tensor_array_stack(self, inputs, input_types):
dim = inputs[1]
assert dim == 0, "stacking on a dynamic tensor list only supported on a first axis"
tensor_array, shape = self.convert_to_tensor_array(inputs[0])
stacked_shape = (Any(),) + shape
stack = self.prelude.get_global_var_static("tensor_array_stack", "float32", shape)
stacked = stack(tensor_array)
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", stacked_shape)
static_tensor_array_ops.register()
get_tensor = self.prelude.get_global_var_static("tensor_get_data", "float32", stacked_shape)
return get_tensor(stacked)
def stack(self, inputs, input_types):
if isinstance(inputs[0], list):
# a static python list of tensors
dim = inputs[1]
return _op.stack(inputs[0], dim)
else:
# List ADT case
assert isinstance(inputs[0], _expr.Expr)
ty = self.infer_type_with_prelude(inputs[0])
list_ty = self.prelude.mod.get_global_type_var("List")
msg = "The input list is expected to be List ADT"
assert isinstance(ty, tvm.ir.TypeCall) and ty.func == list_ty, msg
return self.tensor_array_stack(inputs, input_types)
def sub(self, inputs, input_types):
if len(inputs) == 3:
data0, data1, alpha = self.pytorch_promote_types(inputs, input_types)
return get_relay_op("subtract")(data0, alpha * data1)
else:
data0, data1 = self.pytorch_promote_types(inputs, input_types)
return get_relay_op("subtract")(data0, data1)
def rsub(self, inputs, input_types):
data0, data1, alpha = self.pytorch_promote_types(inputs, input_types)
# note: rsub means data0 and data1 swap places
return get_relay_op("subtract")(data1, alpha * data0)
def embedding(self, inputs, input_types):
weight = inputs[0]
indices = inputs[1]
return _op.take(weight, indices.astype("int32"), axis=0)
def one_hot(self, inputs, input_types):
indices = inputs[0].astype("int32")
num_classes = inputs[1]
if num_classes == -1:
msg = "Inferring the number of classes is not yet supported."
raise NotImplementedError(msg)
dtype = "int32"
on_value = tvm.relay.const(1.0, dtype)
off_value = tvm.relay.const(0.0, dtype)
return _op.one_hot(indices, on_value, off_value, num_classes, -1, dtype)
def index(self, inputs, input_types):
data = inputs[0]
indices = inputs[1]
return _op.adv_index([data] + indices)
def meshgrid(self, inputs, input_types):
data = inputs[0]
return _op.meshgrid(data, indexing="ij")
def nms(self, inputs, input_types):
boxes = inputs[0]
scores = inputs[1]
iou_threshold = inputs[2]
# TVM NMS assumes score > 0
scores = scores - _op.min(scores) + _op.const(1.0)
num_boxes = _op.shape_of(scores)
# PyTorch NMS doesn't have score_threshold, so no need to run get_valid_count
indices = _op.transform.arange(_op.squeeze(num_boxes), dtype="int32")
indices = _op.expand_dims(indices, 0, 1)
# Generate data with shape (1, num_anchors, 5)
scores = AttrCvt(op_name="expand_dims", extras={"axis": -1, "num_newaxis": 1})([scores], {})
data = _op.concatenate([scores, boxes], -1)
data = _op.expand_dims(data, 0, 1)
# Perform Non-Maximum Suppression,
# PyTorch NMS doesn't have parameter top_k and max_output_size
score_index = 0
top_k = max_out_size = -1
nms_ret = get_relay_op("non_max_suppression")(
data=data,
valid_count=num_boxes,
indices=indices,
max_output_size=max_out_size,
iou_threshold=iou_threshold,
force_suppress=True,
top_k=top_k,
coord_start=1,
score_index=score_index,
id_index=-1,
return_indices=True,
invalid_to_bottom=False,
)
# squeeze the two outputs of nms for strided_slice
size = get_relay_op("squeeze")(nms_ret[1], axis=[1])
data_slice = get_relay_op("squeeze")(nms_ret[0], axis=[0])
# strided slice to get the dynamic result
ret = get_relay_op("strided_slice")(
data_slice, begin=_expr.const([0]), end=size, slice_mode="size"
)
# in torchvision, indices from nms are int64
return _op.cast(ret, "int64")
def logsumexp(self, inputs, input_types):
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
dim_list = inputs[1]
keepdim = inputs[2] if len(inputs) > 2 else False
# dim is output of prim::ListConstruct, even if it is int in python code
assert isinstance(dim_list, list), "dim is expected to be a list"
return _op.logsumexp(data[0], axis=dim_list, keepdims=keepdim)
def roi_align(self, inputs, input_types):
data = inputs[0]
boxes = inputs[1]
output_size = (inputs[3], inputs[4])
spatial_scale = inputs[2]
sample_ratio = inputs[5]
aligned = False if len(inputs) < 7 else inputs[6]
if aligned:
boxes -= _expr.const(0.5 / spatial_scale)
return _op.vision.roi_align(data, boxes, output_size, spatial_scale, sample_ratio)
def deform_conv2d(self, inputs, input_types):
data = inputs[0]
weight = inputs[1]
offset = inputs[2]
if len(inputs) > 12:
strides_offset = 5
bias = inputs[4]
logging.warning("mask argument in deformable conv2d is not supported and ignored")
else:
strides_offset = 4
bias = inputs[3]
strides = (inputs[strides_offset], inputs[strides_offset + 1])
padding = (inputs[strides_offset + 2], inputs[strides_offset + 3])
dilation = (inputs[strides_offset + 4], inputs[strides_offset + 5])
groups = inputs[strides_offset + 6]
deformable_groups = inputs[strides_offset + 7]
weight_shape = self.infer_shape(weight)
output_channels = weight_shape[0]
kernel_size = (weight_shape[2], weight_shape[3])
conv_out = _op.nn.deformable_conv2d(
data,
offset,
weight,
strides,
padding,
dilation,
deformable_groups,
groups,
output_channels,
kernel_size,
)
return _op.nn.bias_add(conv_out, bias)
def stft(self, inputs, input_types):
data = inputs[0]
n_fft = inputs[1]
hop_length = inputs[2]
win_length = inputs[3]
window = inputs[4]
normalized = inputs[5]
onesided = inputs[6]
return _op.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
def unbind(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
return unbind(data, axis)
def shape_as_tensor(self, inputs, input_types):
is_symbolic_shape = False
input_shape = self.infer_shape(inputs[0], self.prelude.mod)
for axis in input_shape:
if not isinstance(axis, (int, tvm.tir.IntImm)):
is_symbolic_shape = True
break
if is_symbolic_shape:
ret = _op.shape_of(inputs[0], dtype="int64")
else:
ret = _expr.const(np.array(input_shape), dtype="int64")
return ret
def logical_and(self, inputs, input_types):
lhs = _op.cast(inputs[0], "bool")
rhs = _op.cast(inputs[1], "bool")
return _op.logical_and(lhs, rhs)
def nonzero(self, inputs, input_types, is_numpy_style=False):
data = inputs[0]
ret = _op.transform.argwhere(data)
if is_numpy_style or (len(inputs) > 1 and inputs[1]):
return unbind(ret, 1)
return ret
def nonzero_numpy(self, inputs, input_types):
return self.nonzero(inputs, input_types, is_numpy_style=False)
def scatter(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
index = inputs[2]
src = inputs[3]
return _op.transform.scatter(data, index, src, axis)
def index_put(self, inputs, input_types):
in_tensor = inputs[0]
indices = inputs[1]
values = inputs[2]
accumulate = inputs[3]
if not accumulate:
mode = "update"
else:
mode = "add"
# Combine array of index tensors into one index tensor with shape (N,_)
index_tensor = _op.stack(indices, axis=0)
return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode)
def scalar_tensor(self, inputs, input_types):
data = inputs[0]
cast_map = {
6: "float32",
7: "float64",
3: "int32",
4: "int64",
}
type_key = inputs[1]
if isinstance(data, _expr.Constant):
data = data.data.numpy().tolist()
return _expr.const(data, cast_map[type_key])
def interpolate(self, inputs, input_types):
if isinstance(inputs[1], _expr.Expr):
out_size = inputs[1]
elif isinstance(inputs[1], list):
out_size = []
for i in [0, 1]:
size, _ = try_infer_value(
inputs[1][i],
lambda ret: ret.astype(np.int),
lambda: _op.expand_dims(inputs[1][i], axis=0),
)
out_size.append(size)
out_size = _op.concatenate(out_size, axis=0)
data = inputs[0]
align_corners = inputs[4]
method = inputs[3]
if method.startswith("nearest"):
method = "nearest_neighbor"
elif method[0:2] == "bi":
method = method[2:]
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
return _op.image.resize2d(
data, out_size, None, "NCHW", method, coord_trans, cubic_alpha=-0.75
)
def numel(self, inputs, input_types):
return _op.ndarray_size(inputs[0])
def empty(self, inputs, input_types):
shape = inputs[0]
return _op.zeros(shape, _convert_dtype_value(inputs[1]))
def bincount(self, inputs, input_types):
data = inputs[0]
weights = inputs[1]
input_type = self.infer_type(data).dtype
if input_type == "int64":
logger.warning(
"Casting an int64 input to int32, since we do not have int64 atomic add"
"needed for bincount yet."
)
data = _op.cast(data, "int32")
maximum = _op.max(data)
dim = maximum + _expr.const(1, dtype="int32")
if weights:
weight_type = self.infer_type(weights)
out_dtype = weight_type.dtype
updates = weights
else:
out_dtype = "int32"
updates = _op.ones_like(data)
counts = _op.zeros(_op.reshape(dim, [1]), out_dtype)
out = _op.scatter_add(counts, data, updates, axis=0)
if input_type == "int32":
# Torch always outputs int64 results for bincount
return _op.cast(out, "int64")
return out
def scatter_add(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
index = inputs[2]
src = inputs[3]
return _op.scatter_add(data, index, src, axis=axis)
def cumsum(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
dtype = inputs[2]
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
return _op.cumsum(data, axis=dim, dtype=dtype)
def masked_fill(self, inputs, input_types):
mask = inputs[1]
value = _op.cast(_wrap_const(inputs[2]), input_types[0])
return _op.where(mask, value, inputs[0])
def masked_select(self, inputs, input_types):
mask = inputs[1]
indices = self.nonzero([mask], input_types, is_numpy_style=True)
return _op.adv_index([inputs[0]] + [indices[i] for i in range(indices.size)])
def sort(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
is_descending = inputs[2]
# pytorch sort returns both sorted indices and values
indices = _op.argsort(data, dim, not is_descending)
return _op.gather(data, dim, indices), indices
def argsort(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
is_descending = inputs[2]
return _op.argsort(data, dim, not is_descending)
def is_floating_point(self, inputs, input_types):
assert len(inputs) == 1
if isinstance(inputs[0], _expr.Expr):
input_type = self.infer_type(inputs[0]).dtype
else:
input_type = input_types[0]
is_float = input_type in ["float32", "float64", "float16", "bfloat16"]
return _expr.const(is_float)
def unique(self, inputs, input_types):
assert len(inputs) == 4
[data, is_sorted, return_inverse, return_counts] = inputs
if not is_sorted:
logger.warning("TVM always assumes sorted=True for torch.unique")
is_sorted = True
if return_counts:
[unique, indices, inverse_indices, num_uniq, counts] = _op.unique(
data, is_sorted=is_sorted, return_counts=True
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, inverse_indices, counts_sliced)
else:
[unique, indices, inverse_indices, num_uniq] = _op.unique(
data, is_sorted=is_sorted, return_counts=False
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, inverse_indices)
def nll_loss(self, inputs, input_types):
assert len(inputs) == 5
[predictions, targets, weights, reduction, ignore_index] = inputs
num_class = self.infer_shape(predictions)[1]
if reduction == 0:
reduction = "none"
elif reduction == 1:
reduction = "mean"
else:
reduction = "sum"
if weights is None:
weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])
return _op.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
def flip(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
return _op.transform.reverse(data, axis=axis[0])
def bidir_gru_cell(
self,
input_seqs,
weights_dicts,
):
"""
Bidirectional GRU cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = gru_cell(
input_seqs,
**weights_dicts[0],
)
reverse_outputs, rev_H_t = gru_cell(
input_seqs,
**weights_dicts[1],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0)
def gru_layers(self, input_data, layer_weights_dicts, bidirectional, dropout_p=0.0):
"""
Methods iterates layers for Stacked GRU
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t = self.bidir_gru_cell(input_seqs, weights_dicts)
else:
input_seqs, H_t = gru_cell(input_seqs, **weights_dicts[0])
output_hiddens.append(H_t)
# TODO (vvchernov): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for GRU has not been supported yet!")
return _op.stack(input_seqs, 0), _op.stack(output_hiddens, 0)
def gru(self, inputs, input_types):
"""
Description of GRU in pytorch:
https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=gru#torch.nn.GRU
"""
# TODO (vvchernov): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_state = inputs[1]
# Hidden state shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# Wi layer[0] shape (3 * hidden_size, feature_size)
# Wh layer[0] shape (3 * hidden_size, hidden_size)
# Bi layer[0] shape (3 * hidden_size)
# Bh layer[0] shape (3 * hidden_size)
# Wi layer[>0] shape (3 * hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (3 * hidden_size, hidden_size)
# Bi layer[>0] shape (3 * hidden_size)
# Bh layer[>0] shape (3 * hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X
# TODO (vvchernov): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = int(_infer_shape(_weights[0])[0] / 3)
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
hidden_layers_num = num_directions * num_layers
if hidden_state is None:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(hidden_state, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked GRU number of weights sets should be the same as number of layers!"
output, out_hidden_state = self.gru_layers(
X,
layer_weights_dicts,
bidirectional,
dropout_p=dropout_p,
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, out_hidden_state)
def bidir_lstm_cell(
self,
input_seqs,
weights_dicts,
):
"""
Bidirectional LSTM cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t, fw_C_t = lstm_cell(
input_seqs,
**weights_dicts[0],
)
reverse_outputs, rev_H_t, rev_C_t = lstm_cell(
input_seqs,
**weights_dicts[1],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, (fw_H_t, fw_C_t), (rev_H_t, rev_C_t)
def lstm_layers(self, input_data, layer_weights_dicts, bidirectional, dtype, dropout_p=0.0):
"""
Methods iterates layers for Stacked LSTM
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t, C_t = self.bidir_lstm_cell(input_seqs, weights_dicts)
else:
input_seqs, H_t, C_t = lstm_cell(input_seqs, **weights_dicts[0])
output_hiddens.append((H_t, C_t))
# TODO (vvchernov): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for LSTM has not been supported yet!")
final_hiddens = []
if bidirectional:
for output_hidden in output_hiddens:
final_hiddens.append(output_hidden[0])
final_hiddens.append(output_hidden[1])
else:
final_hiddens = output_hiddens
return _op.stack(input_seqs, 0), final_hiddens
def lstm(self, inputs, input_types):
"""
Description of LSTM in pytorch:https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html
Native implementation for torch version less than 1.8.0 (projection is unsupported):
https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339/aten/ \
src/ATen/native/RNN.cpp#L1396
Native implementation for torch version from 1.8.0 and higher (projection is supported):
https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L1483
"""
# TODO (vvchernov): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_states = inputs[1]
assert len(hidden_states) == 2, "lstm expects two hidden states"
h_0 = hidden_states[0]
c_0 = hidden_states[1]
# H0 shape (hidden_layers_num, batch, proj_size) if projection
# else (hidden_layers_num, batch, hidden_size)
# C0 shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# If no projection
# Wi layer[0] shape (4 * hidden_size, feature_size)
# Wh layer[0] shape (4 * hidden_size, hidden_size)
# Bi layer[0] shape (4 * hidden_size)
# Bh layer[0] shape (4 * hidden_size)
# Wi layer[>0] shape (4 * hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (4 * hidden_size, hidden_size)
# Bi layer[>0] shape (4 * hidden_size)
# Bh layer[>0] shape (4 * hidden_size)
# If projection
# Wi layer[0] shape (4 * hidden_size, feature_size)
# Wh layer[0] shape (4 * hidden_size, proj_size)
# Bi layer[0] shape (4 * hidden_size)
# Bh layer[0] shape (4 * hidden_size)
# P layer[0] shape (proj_size, hidden_size)
# Wi layer[>0] shape (4 * hidden_size, proj_size * num_directions)
# Wh layer[>0] shape (4 * hidden_size, proj_size)
# Bi layer[>0] shape (4 * hidden_size)
# Bh layer[>0] shape (4 * hidden_size)
# P layer[>0] shape (proj_size, hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
has_proj = False
proj_size = 0
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
if weights_num == 5:
has_proj = True
proj_size = _infer_shape(_weights[4])[0]
else:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
if weights_num == 3:
has_proj = True
proj_size = _infer_shape(_weights[2])[0]
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X
# TODO (vvchernov): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = _infer_shape(_weights[0])[0] / 4
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
layers_c = []
hidden_layers_num = num_directions * num_layers
if h_0 is None:
if has_proj:
h_0 = _op.zeros((batch_size, proj_size), X_dtype)
else:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(h_0, 0)
if c_0 is None:
c_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_c.append(c_0)
else:
layers_c = unbind(c_0, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "cell_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 4]
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
if has_proj:
rev_weights_dict["proj"] = _weights[j + 4]
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 4]
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "cell_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 2]
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
if has_proj:
rev_weights_dict["proj"] = _weights[j + 2]
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 2]
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked LSTM number of weights sets should be the same as number of layers!"
outputs = self.lstm_layers(
X,
layer_weights_dicts,
bidirectional,
dtype=X_dtype,
dropout_p=dropout_p,
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
output = outputs[0]
hy = []
cy = []
for hidden in outputs[1]:
hy.append(hidden[0])
cy.append(hidden[1])
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, _op.stack(hy, 0), _op.stack(cy, 0))
def all_any_common(self, op, inputs, input_types):
dim = inputs[1]
keepdim = inputs[2]
if self.infer_type(inputs[0]).dtype != "bool":
# The input dtype can be uint8.
inp = _op.cast(inputs[0], "bool")
else:
inp = inputs[0]
return op(inp, axis=dim, keepdims=keepdim)
def searchsorted_common(
self, sorted_sequence, values, out_int32, right, side=None, out=None, sorter=None
):
assert side is None and out is None and sorter is None, "unsupported parameters"
dtype = "int32" if out_int32 else "int64"
values_shape = _infer_shape(values)
if len(values_shape) == 0:
values = _op.expand_dims(values, 0)
out = _op.searchsorted(sorted_sequence, values, right=right, dtype=dtype)
if len(values_shape) == 0:
return _op.squeeze(out)
return out
def searchsorted(self, inputs, input_types):
return self.searchsorted_common(*inputs)
def bucketize(self, inputs, input_types):
return self.searchsorted_common(inputs[1], inputs[0], inputs[2], inputs[3])
def roll(self, inputs, input_types):
def slide_axes(inp, shape, ax):
axes = list(range(len(shape)))
axes = axes[:ax] + [-1] + axes[ax:-1]
return _op.transpose(inp, axes)
x = inputs[0]
shifts = inputs[1]
dims = inputs[2]
shape = self.infer_shape(x)
start = _expr.const(0, "int64")
step = _expr.const(1, "int64")
out = x
for i, dim in enumerate(dims):
roll_dim = _expr.const(shape[dim], "int64")
indices_1d = _op.mod(
_op.transform.arange(start, roll_dim, step, "int64")
- _expr.const(shifts[i], "int64")
+ roll_dim,
roll_dim,
)
# First fill in the last axis with roll indices, and then do transpose to
# bring the roll indices into the desired axis.
indices = slide_axes(
_op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)),
shape,
dim,
)
out = _op.gather(out, dim, indices)
return out
def einsum(self, inputs, input_types):
equation, data = inputs
return _op.einsum(data, equation)
def dot(self, inputs, _):
lhs, rhs = inputs
return _op.sum(_op.multiply(lhs, rhs))
def mv(self, inputs, _):
lhs, rhs = inputs
# Convert the 1D matrix (vector) into a 2D matrix with the extra
# dimension=1
rhs_matrix = _op.transform.expand_dims(rhs, 0)
# Run multiplication
dense_result = _op.nn.dense(lhs, rhs_matrix, units=None)
# Chop off the extra result dimension
return _op.transform.squeeze(dense_result)
def grid_sampler(self, inputs, input_types):
interpolate_mode = inputs[2]
padding_mode = inputs[3]
align_corners = inputs[4]
data_shape = self.infer_shape_with_prelude(inputs[0])
if len(data_shape) == 4:
layout = "NCHW"
axes = [0, 3, 1, 2]
grid = _op.transform.transpose(inputs[1], axes)
elif len(data_shape) == 5:
layout = "NCDHW"
axes = [0, 4, 1, 2, 3]
grid = _op.transform.transpose(inputs[1], axes)
else:
msg = f"only 4D and 5D are supported."
raise ValueError(msg)
if interpolate_mode == 0:
interpolate_str = "bilinear"
elif interpolate_mode == 1:
interpolate_str = "nearest"
elif interpolate_mode == 2:
interpolate_str = "bicubic"
else:
msg = f"interpolation method {interpolate_mode} is not supported"
raise ValueError(msg)
if padding_mode == 0:
padding_mode_str = "zeros"
elif padding_mode == 1:
padding_mode_str = "border"
elif padding_mode == 2:
padding_mode_str = "reflection"
else:
msg = f"padding_mode {padding_mode} is not supported"
raise ValueError(msg)
return _op.image.grid_sample(
inputs[0], grid, interpolate_str, layout, padding_mode_str, align_corners
)
# Operator mappings
def create_convert_map(self):
self.convert_map = {
"aten::is_floating_point": self.is_floating_point,
"aten::pixel_shuffle": self.pixel_shuffle,
"aten::device": self.none,
"prim::device": self.none,
"aten::sub": self.sub,
"aten::max": self.max,
"aten::min": self.min,
"aten::amax": self.max,
"aten::amin": self.min,
"aten::stft": self.stft,
"aten::mul": self.make_elemwise("multiply"),
"aten::pow": self.make_elemwise("power"),
"aten::arange": self.arange,
"aten::meshgrid": self.meshgrid,
"aten::div": self.make_elemwise("divide"),
"aten::floor_divide": self.make_elemwise("floor_divide"),
"aten::true_divide": self.make_elemwise("divide"),
"aten::fmod": self.make_elemwise("trunc_mod"),
"aten::remainder": self.make_elemwise("floor_mod"),
"aten::addcdiv": self.addcdiv,
"aten::addcmul": self.addcmul,
"aten::ones": self.ones,
"aten::ones_like": self.ones_like,
"aten::zeros": self.zeros,
"aten::zeros_like": self.zeros_like,
"aten::full": self.full,
"aten::full_like": self.full_like,
"aten::linspace": self.linspace,
"aten::reciprocal": self.reciprocal,
"aten::repeat": self.repeat,
"aten::repeat_interleave": self.repeat_interleave,
"aten::to": self.to,
"aten::squeeze": self.squeeze,
"aten::unsqueeze": self.unsqueeze,
"aten::cat": self.concatenate,
"aten::slice": self.slice,
"aten::narrow": self.narrow,
"aten::split": self.split,
"aten::split_with_sizes": self.split_with_sizes,
"aten::select": self.select,
"aten::take": self.take,
"aten::where": self.where,
"aten::topk": self.topk,
"aten::relu": self.relu,
"aten::prelu": self.prelu,
"aten::leaky_relu": self.leaky_relu,
"aten::elu": self.elu,
"aten::celu": self.celu,
"aten::gelu": self.gelu,
"aten::selu": self.selu,
"aten::silu": self.silu,
"aten::log_sigmoid": self.log_sigmoid,
"aten::adaptive_avg_pool1d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool1d
),
"aten::adaptive_avg_pool2d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool2d
),
"aten::adaptive_avg_pool3d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool3d
),
"aten::adaptive_max_pool1d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool1d
),
"aten::adaptive_max_pool2d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool2d
),
"aten::adaptive_max_pool3d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool3d
),
"aten::max_pool2d": self.maxpool_2d,
"aten::max_pool2d_with_indices": self.maxpool_2d_with_indices,
"aten::max_pool1d": self.maxpool_1d,
"aten::max_pool3d": self.maxpool_3d,
"aten::hardtanh": self.hardtanh,
"aten::_convolution": self.convolution,
"aten::softmax": self.softmax,
"aten::threshold": self.threshold,
"aten::contiguous": self.contiguous,
"aten::batch_norm": self.batch_norm,
"aten::instance_norm": self.instance_norm,
"aten::layer_norm": self.layer_norm,
"aten::group_norm": self.group_norm,
"aten::transpose": self.transpose,
"aten::t": self.transpose,
"aten::flatten": self.flatten,
"aten::addmm": self.addmm,
"aten::size": self.size,
"aten::view": self.view,
"aten::reshape": self.reshape,
"aten::clone": self.clone,
"aten::log_softmax": self.log_softmax,
"aten::sigmoid": self.sigmoid,
"aten::softplus": self.softplus,
"aten::avg_pool1d": self.make_avg_pool(1),
"aten::avg_pool2d": self.make_avg_pool(2),
"aten::avg_pool3d": self.make_avg_pool(3),
"aten::linear": self.linear,
"aten::dropout": self.dropout,
"aten::feature_dropout": self.dropout,
"aten::alpha_dropout": self.dropout,
"aten::mean": self.mean,
"aten::chunk": self.chunk,
"aten::unsafe_chunk": self.chunk,
"aten::matmul": self.matmul,
"aten::bmm": self.matmul,
"aten::expand": self.expand,
"aten::Int": self.int,
"prim::NumToTensor": self.numtotensor,
"prim::ImplicitTensorToNum": self.tensortonum,
"aten::ScalarImplicit": self.tensortonum,
"aten::constant_pad_nd": self.make_pad("constant"),
"aten::reflection_pad1d": self.make_pad("reflect"),
"aten::reflection_pad2d": self.make_pad("reflect"),
"aten::replication_pad1d": self.make_pad("edge"),
"aten::replication_pad2d": self.make_pad("edge"),
"aten::replication_pad3d": self.make_pad("edge"),
"aten::permute": self.transpose,
"aten::sum": self.make_reduce("sum"),
"aten::prod": self.make_reduce("prod"),
"aten::argmin": self.make_reduce("argmin"),
"aten::argmax": self.make_reduce("argmax"),
"aten::norm": self.norm,
"aten::frobenius_norm": self.frobenius_norm,
"aten::std": self.std,
"aten::var": self.variance,
"aten::var_mean": self.var_mean,
"aten::abs": self.make_unary("abs"),
"aten::neg": self.make_unary("negative"),
"aten::cos": self.make_unary("cos"),
"aten::cosh": self.make_unary("cosh"),
"aten::sin": self.make_unary("sin"),
"aten::sinh": self.make_unary("sinh"),
"aten::tan": self.make_unary("tan"),
"aten::tanh": self.make_unary("tanh"),
"aten::acos": self.make_unary("acos"),
"aten::asin": self.make_unary("asin"),
"aten::atan": self.make_unary("atan"),
"aten::log": self.make_unary("log"),
"aten::log2": self.make_unary("log2"),
"aten::log10": self.make_unary("log10"),
"aten::log1p": self.log1p,
"aten::exp": self.make_unary("exp"),
"aten::erf": self.make_unary("erf"),
"aten::trunc": self.make_unary("trunc"),
"aten::sign": self.make_unary("sign"),
"aten::sqrt": self.make_unary("sqrt"),
"aten::rsqrt": self.make_unary("rsqrt"),
"aten::square": self.square,
"aten::ceil": self.make_unary("ceil"),
"aten::floor": self.make_unary("floor"),
"aten::round": self.make_unary("round"),
"aten::isfinite": self.make_unary("isfinite"),
"aten::isinf": self.make_unary("isinf"),
"aten::isnan": self.make_unary("isnan"),
"aten::clamp": self.clamp,
"aten::clamp_min": self.clamp_min,
"aten::clamp_max": self.clamp_max,
"aten::detach": self.identity,
"aten::upsample_bilinear2d": self.make_upsample("linear"),
"aten::upsample_bicubic2d": self.make_upsample("cubic"),
"aten::upsample_nearest2d": self.make_upsample("nearest_neighbor"),
"aten::upsample_trilinear3d": self.make_upsample3d("linear"),
"aten::upsample_nearest3d": self.make_upsample3d("nearest_neighbor"),
"aten::expand_as": self.expand_as,
"aten::lt": self.make_elemwise("less"),
"aten::gt": self.make_elemwise("greater"),
"aten::le": self.make_elemwise("less_equal"),
"aten::ge": self.make_elemwise("greater_equal"),
"aten::ne": self.make_elemwise("not_equal"),
"aten::eq": self.make_elemwise("equal"),
"aten::logical_not": self.logical_not,
"aten::logical_xor": self.logical_xor,
"aten::bitwise_not": self.bitwise_not,
"aten::bitwise_xor": self.bitwise_xor,
"aten::Bool": self.Bool,
"aten::Float": self.Float,
"aten::rsub": self.rsub,
"aten::embedding": self.embedding,
"aten::one_hot": self.one_hot,
"aten::mm": self.matmul,
"aten::add": self.add,
"aten::stack": self.stack,
"aten::__getitem__": self.list_getitem,
"aten::len": self.list_len,
"aten::type_as": self.type_as,
"aten::gather": self.gather,
"aten::index_select": self.select,
"aten::index": self.index,
"torchvision::nms": self.nms,
"aten::logsumexp": self.logsumexp,
"torchvision::roi_align": self.roi_align,
"torchvision::deform_conv2d": self.deform_conv2d,
"aten::unbind": self.unbind,
"aten::__and__": self.logical_and,
"aten::logical_and": self.logical_and,
"aten::_shape_as_tensor": self.shape_as_tensor,
"aten::nonzero": self.nonzero,
"aten::nonzero_numpy": self.nonzero_numpy,
"aten::scatter": self.scatter,
"aten::index_put": self.index_put,
"aten::scalar_tensor": self.scalar_tensor,
"aten::__interpolate": self.interpolate,
"aten::IntImplicit": self.identity,
"aten::tensor": self.identity, # used for example in tensor(1.0)
"aten::numel": self.numel,
"aten::empty": self.empty,
"aten::bincount": self.bincount,
"aten::scatter_add": self.scatter_add,
"aten::__not__": self.logical_not,
"aten::hardswish": self.hard_swish,
"aten::hardsigmoid": self.hard_sigmoid,
"aten::cumsum": self.cumsum,
"aten::masked_fill": self.masked_fill,
"aten::masked_select": self.masked_select,
"aten::argsort": self.argsort,
"aten::sort": self.sort,
"aten::_unique2": self.unique,
"aten::nll_loss": self.nll_loss,
"aten::nll_loss2d": self.nll_loss,
"aten::nll_loss_nd": self.nll_loss,
"aten::flip": self.flip,
"aten::gru": self.gru,
"aten::lstm": self.lstm,
"aten::all": functools.partial(self.all_any_common, _op.all),
"aten::any": functools.partial(self.all_any_common, _op.any),
"aten::searchsorted": self.searchsorted,
"aten::bucketize": self.bucketize,
"aten::roll": self.roll,
"aten::einsum": self.einsum,
"aten::dot": self.dot,
"aten::mv": self.mv,
"aten::grid_sampler": self.grid_sampler,
"aten::__ior__": self.make_elemwise("bitwise_or"),
"aten::__iand__": self.make_elemwise("bitwise_and"),
"aten::__ixor__": self.make_elemwise("bitwise_xor"),
"aten::__lshift__": self.make_elemwise("left_shift"),
"aten::__rshift__": self.make_elemwise("right_shift"),
}
def update_convert_map(self, custom_map):
self.convert_map.update(custom_map)
def report_missing_conversion(self, op_names):
"""Check if all ops in an input graph are supported by TVM"""
known_ops = [
"prim::Constant",
"prim::GetAttr",
"prim::ListConstruct",
"prim::ListUnpack",
"prim::TupleConstruct",
"prim::TupleUnpack",
"prim::RaiseException",
"prim::If",
"prim::Loop",
]
known_ops += list(self.convert_map.keys())
known_ops += list(qnn_torch.convert_map.keys())
missing = []
for op_name in op_names:
# Also take care of in-place variant ops like aten::relu_
if op_name not in known_ops and not (
op_name.endswith("_") and op_name[:-1] in known_ops
):
missing.append(op_name)
if missing:
msg = "The following operators are not implemented: {}".format(missing)
raise NotImplementedError(msg)
def convert_block(self, block, outputs):
"""Translate Torch "Block", used for prim::If and prim::Loop"""
ops = _get_operator_nodes(block.nodes())
ret_names = _get_input_names(block.returnNode())
return self.convert_operators(ops, outputs, ret_names)
def convert_if(self, if_node, outputs):
"""Translate Torch prim::If to Relay If"""
cond = outputs[if_node.inputsAt(0).debugName()]
blocks = list(if_node.blocks())
true_branch = self.convert_block(blocks[0], outputs)
false_branch = self.convert_block(blocks[1], outputs)
assert len(true_branch) == 1 and len(false_branch) == 1
return _expr.If(cond, true_branch[0], false_branch[0])
def convert_loop(self, loop_node, outputs):
"""Translate Torch prim::Loop to Relay while_loop"""
def get_input(index):
ivalue = loop_node.inputsAt(index)
inode = ivalue.node()
if inode.kind() == "prim::Constant":
return _expr.const(_get_constant(inode))
var_name = ivalue.debugName()
assert var_name in outputs
return _wrap_const(outputs[var_name])
# Refer to the spec for prim::Loop below
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/OVERVIEW.md#loops
# The first input: %max_trip_count
# The second input: %initial_condition
# The rest of input: loop variables
max_loop_count = get_input(0)
init_cond = get_input(1)
num_loop_var = len(list(loop_node.inputs())) - 2
init_vals = [get_input(i + 2) for i in range(num_loop_var)]
# while loop has always max_loop_count being int64 max
# max_loop_count.data (tvm.runtime.NDArray) is -1, so _get_constant again
is_while_loop = (
isinstance(max_loop_count, _expr.Constant)
and _get_constant(loop_node.inputsAt(0).node()) == sys.maxsize
)
if is_while_loop:
loop_iter_dtype = "bool"
# while loop with non input dependent condition such as while i < 10:
# init_cond is int, need to cast to bool to type check
if isinstance(init_cond, _expr.Constant):
init_cond = _op.cast(init_cond, "bool")
init_loop_iter_val = init_cond
else:
loop_iter_dtype = "int32"
# always count from 0
init_loop_iter_val = _expr.const(0, dtype="int32")
body_block = list(loop_node.blocks())[0]
block_input_names = _get_input_names(body_block)
num_block_inputs = len(block_input_names)
name_val_pairs = list(zip(block_input_names, [init_loop_iter_val] + init_vals))
outputs.update(name_val_pairs)
def get_var(name, val):
if val:
checked_type = self.infer_type_with_prelude(val)
if hasattr(checked_type, "shape"):
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(Any())
else:
actual_shape.append(dim)
return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
else:
return _expr.var(name, type_annotation=checked_type)
return _expr.var(name)
loop_iter_var = _expr.var(block_input_names[0], shape=(), dtype=loop_iter_dtype)
loop_vars = [get_var(name, val) for name, val in name_val_pairs[1:]]
# Add non constant free variables to loop variables to prevent code blow up
# Without this, if there are two for loops in a row, which often happens
# if the outer loop is unrolled, the computation corresponding to the first for loop
# is inlined inside loop body, turning O(N) + O(N) computation into O(N^2).
# This issue was found when converting from Stacked LSTM test. Torch does not add the
# outputof the eariler loop into loop variables of the next loop.
# So the variable corresponding to the first loop output appears free in the second
# loop body.
free_vars = [
var
for var in _get_free_vars_from_block(body_block)
if var in outputs
and not isinstance(outputs[var], (_expr.Constant, int, float, str))
and outputs[var]
]
prev_outputs = {}
for name in free_vars:
prev_output = outputs[name]
new_loop_var = get_var(name, prev_output)
prev_outputs[name] = prev_output
outputs[name] = new_loop_var
loop_vars.append(new_loop_var)
init_vals.append(prev_output)
def cond(*current_vals):
i = current_vals[0]
if is_while_loop:
return _op.equal(i, _expr.const(True, "bool"))
return _op.less(i, max_loop_count)
def body(*current_vals):
# Update loop variables using the prev iteration outputs
assert len(current_vals) == num_block_inputs + len(free_vars)
for (i, val) in enumerate(current_vals):
if i < num_block_inputs:
outputs[block_input_names[i]] = val
else:
outputs[free_vars[i - num_block_inputs]] = val
block_outputs = self.convert_block(body_block, outputs)
block_outputs += [outputs[name] for name in free_vars]
if not is_while_loop:
# iter var increment implicit in torch, so do it manually
# for while loop, block_outputs[0] is already a boolean,
# the result of termination check
incr = _expr.const(1, dtype="int32")
block_outputs[0] = current_vals[0] + incr
return block_outputs
loop = while_loop(cond, [loop_iter_var] + loop_vars, body)
loop_val = loop(init_loop_iter_val, *init_vals)
# restore original output values for free vars
outputs.update(prev_outputs)
# The first element is a loop counter or boolean condition, ignore it
return [_expr.TupleGetItem(loop_val, i + 1) for i in range(num_loop_var)]
def convert_operators(self, operators, outputs, ret_names):
"""Convert each Torch IR operators to Relay equivalent"""
for node_name, op_node in operators:
operator = op_node.kind()
inputs = _get_op_inputs(op_node, outputs)
if operator == "prim::Constant":
outputs[node_name] = _get_constant(op_node)
elif operator == "prim::ListConstruct" and _should_construct_dynamic_list(op_node):
outputs[node_name] = self.convert_to_list_adt(inputs)
elif operator == "prim::ListConstruct":
# This assumes that no more elements will be appended to this list
# In this case, we keep the Python list
outputs[node_name] = inputs
elif operator == "prim::TupleConstruct":
def _handel_nested_input(inputs):
inputs_list = []
for i, _ in enumerate(inputs):
if isinstance(inputs[i], list):
inputs_list.append(_handel_nested_input(inputs[i]))
else:
assert isinstance(inputs[i], _expr.Expr)
inputs_list.append(inputs[i])
return _expr.Tuple(inputs_list)
outputs[node_name] = _handel_nested_input(inputs)
elif operator in ["prim::ListUnpack", "prim::TupleUnpack"]:
assert len(inputs) == 1
if isinstance(inputs[0], (list, _expr.TupleWrapper)):
unpacked = inputs[0]
else:
unpacked = _unpack_tuple(inputs[0])
outputs.update(zip(_get_output_names(op_node), unpacked))
elif operator == "prim::prim::RaiseException":
logger.warning("raising exceptions is ignored")
outputs[node_name] = None
elif operator == "prim::If":
if_out = self.convert_if(op_node, outputs)
outputs[node_name] = if_out
elif operator == "prim::Loop":
loop_out = self.convert_loop(op_node, outputs)
unpacked_names = _get_output_names(op_node)
assert len(loop_out) == len(unpacked_names)
outputs.update(zip(unpacked_names, loop_out))
else:
if operator not in self.convert_map:
# At this point, the only possible ops that are not in convert_map are
# in-place variant of ops like aten::relu_
assert operator.endswith("_")
logger.warning(
"An in-place op %s found, the result will not be correct "
"if the model depends on side-effects by this op.",
operator,
)
relay_op = self.convert_map[operator[:-1]]
else:
relay_op = self.convert_map[operator]
relay_out = relay_op(
inputs, _get_input_types(op_node, outputs, default_dtype=self.default_dtype)
)
self.record_output_type(relay_out)
if isinstance(relay_out, tuple):
# This is for torch operators that return multiple outputs
# See _adaptive_max_2d above for example
out_names = _get_output_names(op_node)
outputs.update(zip(out_names, relay_out))
else:
assert op_node.outputsSize() == 1
outputs[node_name] = relay_out
return [_wrap_const(outputs[ret_name]) for ret_name in ret_names]
def _pytorch_result_type(dtypes, non_tensor_inputs):
"""This promotes TVM dtypes like PyTorch would"""
import torch
dtype_map = {
"float64": torch.float64,
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
"int64": torch.int64,
"int32": torch.int32,
"int16": torch.int16,
"int8": torch.int8,
"uint8": torch.uint8,
"bool": torch.bool,
}
if len(dtypes) > 0:
result_type = dtypes[0]
for dt in dtypes[1:]:
if dt != result_type: # we don't want to work with same types as we
# don't do quantized here (which cannot be promoted?)
result_type = _convert_data_type(
str(
torch.result_type(
torch.zeros((), dtype=dtype_map[result_type]),
torch.zeros((), dtype=dtype_map[dt]),
)
)
)
else:
result_type = "bool" # this is the smallest type...
for inp in non_tensor_inputs:
result_type = _convert_data_type(
str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]), inp))
)
return result_type
# Helper functions for operator implementation
def _convert_dtype_value(val):
"""converts a PyTorch the PyTorch numeric type id to a torch scalar type."""
convert_torch_dtype_map = {
7: "torch.float64",
6: "torch.float32",
5: "torch.float16",
4: "torch.int64",
3: "torch.int32",
2: "torch.int16",
1: "torch.int8",
0: "torch.unit8",
None: "torch.int64",
} # Default is torch.int64
if val in convert_torch_dtype_map:
return _convert_data_type(convert_torch_dtype_map[val])
else:
msg = "Torch data type value %d is not handled yet." % (val)
raise NotImplementedError(msg)
def _convert_data_type(input_type, default_dtype=None):
"""converts the PyTorch scalar type input_type to a TVM dtype.
optionally, default_dtype can be a TVM dtype that is used
if input_type is None (but not when it is unknown)"""
if input_type is None and default_dtype is not None:
return default_dtype
input_type = input_type.lower()
if input_type in ["double", "float64", "torch.float64"]:
return "float64"
elif input_type in ["float", "float32", "torch.float32"]:
return "float32"
elif input_type in ["half", "float16", "torch.float16"]:
return "float16"
elif input_type in ["long", "int64", "torch.int64"]:
return "int64"
elif input_type in ["int", "int32", "torch.int32"]:
return "int32"
elif input_type in ["short", "int16", "torch.int16"]:
return "int16"
elif input_type in ["char", "int8", "torch.int8"]:
return "int8"
elif input_type in ["byte", "uint8", "torch.uint8"]:
return "uint8"
elif input_type in ["quint8", "torch.quint8"]:
return "quint8"
elif input_type in ["qint8", "torch.qint8"]:
return "qint8"
elif input_type in ["qint32", "torch.qint32"]:
return "qint32"
elif input_type in ["bool", "torch.bool"]:
return "bool"
elif input_type in ["str"]:
return "str"
else:
raise NotImplementedError("input_type {} is not handled yet".format(input_type))
return "float32" # Never reached
def _create_typed_const(data, dtype):
"""create a (scalar) constant of given value and dtype.
dtype should be a TVM dtype"""
if dtype == "float64":
typed_data = _expr.const(np.float64(data), dtype=dtype)
elif dtype == "float32":
typed_data = _expr.const(np.float32(data), dtype=dtype)
elif dtype == "float16":
typed_data = _expr.const(np.float16(data), dtype=dtype)
elif dtype == "int64":
typed_data = _expr.const(np.int64(data), dtype=dtype)
elif dtype == "int32":
typed_data = _expr.const(np.int32(data), dtype=dtype)
elif dtype == "int16":
typed_data = _expr.const(np.int16(data), dtype=dtype)
elif dtype == "int8":
typed_data = _expr.const(np.int8(data), dtype=dtype)
elif dtype == "uint8":
typed_data = _expr.const(np.uint8(data), dtype=dtype)
else:
raise NotImplementedError("input_type {} is not handled yet".format(dtype))
return typed_data
def _wrap_const(c):
if not isinstance(c, (_expr.Expr, list, tvm.tir.expr.Any)):
return _expr.const(c)
return c
def _run_jit_passes(graph, enable_lower_all_tuples=True):
"""The inline pass is necessary to unwrap prim::CallMethod"""
# pylint: disable=c-extension-no-member
import torch
if is_version_greater_than("1.5.1"):
# This is required for torchvision detection models from 1.6 above
# It is the same as _jit_pass_inline, except that it has some special
# case behaviors for some ops such as aten::__interpolate()
torch._C._jit_pass_onnx_function_substitution(graph)
else:
torch._C._jit_pass_inline(graph)
if enable_lower_all_tuples:
torch._C._jit_pass_lower_all_tuples(graph)
def _get_tensor_and_var(torch_tensor, name):
tensor = tvm.nd.array(torch_tensor.cpu().numpy())
var = _expr.var(name, shape=tensor.shape, dtype=tensor.dtype)
return tensor, var
def _get_output_name(node):
assert node.outputsSize() == 1
return node.output().debugName()
def _get_output_names(node):
return [output.debugName() for output in node.outputs()]
def _get_input_names(node_or_graph):
return [inp.debugName() for inp in node_or_graph.inputs()]
def _get_op_inputs(op_node, outputs):
return [outputs[name] for name in _get_input_names(op_node)]
def _get_node_type(node):
assert node.outputsSize() == 1
return node.output().type().kind()
def _get_uses(node):
uses = []
for output in node.outputs():
uses += output.uses()
return uses
def _get_users(node):
return [use.user for use in _get_uses(node)]
def _getattr_full_name(getattrs, sep="."):
return sep.join([getattr_attr_name(node) for node in getattrs])
def _get_pytorch_value_type(typ, default_dtype="float32"):
kind = typ.kind()
if kind == "TensorType":
if typ.scalarType() is None:
# Tensor's type can be unknown if we use torch.jit.script(...)
# Defaults can be passed in, if not it is float32
logger.warning("Untyped Tensor found, assume it is %s", default_dtype)
return default_dtype
else:
return _convert_data_type(typ.scalarType())
elif kind == "ListType":
return "ListType"
elif kind in ["IntType", "FloatType", "BoolType", "StringType", "OptionalType"]:
pt_dtype = str(typ).lower()
dtype = pt_dtype if pt_dtype == "OptionalType" else _convert_data_type(pt_dtype)
return dtype
else:
return "UnsupportedType"
def _get_input_types(op_node, outputs, default_dtype="float32"):
"""Returns a TVM dtype for each input nodes derived from the torch type"""
in_types = []
for inp in op_node.inputs():
if inp.node().kind() == "prim::GetAttr":
# GetAttr nodes always return None when we call scalarType() on it
name = inp.debugName()
assert name in outputs
if isinstance(outputs[name], _expr.Var):
in_types.append(outputs[name].type_annotation.dtype)
else:
# For quantized modules with parameters, here we would get
# "prim::GetAttr[name="_packed_params"]". Since the dtype corresponding to
# _packed_params is not needed by quantized ops, we return an arbitrary type.
in_types.append(default_dtype)
else:
in_types.append(_get_pytorch_value_type(inp.type(), default_dtype=default_dtype))
return in_types
def _get_constant(node):
"""Retrieve a constant associated with this prim::Constant node"""
attribute_names = node.attributeNames()
num_attributes = len(attribute_names)
if num_attributes == 1:
attr_name = attribute_names[0]
ty = node.output().type().kind()
if ty == "IntType":
return node.i(attr_name)
elif ty == "BoolType":
return bool(node.i(attr_name))
elif ty in ["FloatType", "LongType"]:
return node.f(attr_name)
elif ty in ["TensorType", "CompleteTensorType"]:
tensor = node.t(attr_name)
if tensor.is_cuda:
tensor = tensor.cpu()
if len(tensor.shape) == 0: # tensor(0.1)
# TODO(t-vi): When is this needed?
return tensor.item()
return _wrap_const(tensor.numpy())
elif ty in ["DeviceObjType", "StringType"]:
return node.s(attr_name)
elif ty == "FunctionType":
return None
else:
raise NotImplementedError("Unsupported type: %s" % ty)
else:
assert num_attributes == 0
return None
def _get_operator_nodes(nodes):
"""Returns torch IR nodes that need conversion to Relay"""
ops = []
# Traverse nodes and add to graph
for node in nodes:
if node.outputsSize() == 0:
continue
if node.outputsSize() > 1:
node_name = "_".join(_get_output_names(node))
else:
node_name = _get_output_name(node)
if node.kind() != "prim::GetAttr":
ops.append((node_name, node))
return ops
def _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_dtype="float32"):
"""
Return Relay vars from input shapes and create entries based on
expected graph inputs - to allow translation
"""
graph_inputs = list(graph.inputs())
if is_module:
# a module has "self" as first input, which we do not need/want
graph_inputs = graph_inputs[1:]
if not isinstance(input_infos, list):
msg = "Graph inputs input_infos should be a list"
raise RuntimeError(msg)
if len(graph_inputs) != len(input_infos):
msg = "PyTorch has {} inputs and input_infos lists {}.".format(
len(graph_inputs), len(input_infos)
)
raise RuntimeError(msg)
def get_relay_ty(ishape, itype, pt_type):
if pt_type.kind() == "TensorType":
if not (_is_int_seq(ishape) or len(ishape) == 0):
msg = "Shape for Tensors must be lists of ints"
raise RuntimeError(msg)
if (pt_type.dim() is not None and pt_type.dim() != len(ishape)) or (
pt_type.sizes() is not None
and any([s1 != s2 for s1, s2 in zip(pt_type.sizes(), ishape)])
):
msg = "Shapes of input list and information in the graph do not match"
raise RuntimeError(msg)
pt_dtype = pt_type.scalarType()
if not pt_dtype and itype:
pt_dtype = itype
dtype = _convert_data_type(pt_dtype, default_dtype=default_dtype)
return TensorType(ishape, dtype)
elif pt_type.kind() == "TupleType":
if not isinstance(ishape, tuple):
msg = "Shapes for tuples must be tuples"
raise RuntimeError(msg)
return TupleType(
[get_relay_ty(elem, itype, pt_t) for elem, pt_t in zip(ishape, pt_type.elements())]
)
elif pt_type.kind() == "ListType":
if not isinstance(ishape, list):
msg = "Shapes for lists must be lists"
raise RuntimeError(msg)
pt_elemtype = pt_type.getElementType()
elem_tys = [get_relay_ty(s, itype, pt_elemtype) for s in ishape]
if len(elem_tys) > 0 and not all(map(lambda ty: ty == elem_tys[0], elem_tys)):
msg = "List elements need have identical types"
raise RuntimeError(msg)
rlist, _, _ = prelude.mod.get_type("List")
return rlist(elem_tys[0])
elif pt_type.kind() == "OptionalType":
# we do not support None yet, so we fill in the type
return get_relay_ty(ishape, itype, pt_type.getElementType())
# TODO: scalar inputs
raise NotImplementedError("unsupported input type")
input_vars = {}
new_input_infos = []
for num, inp in enumerate(input_infos):
if not isinstance(inp, tuple):
msg = "Graph input {} is not a tuple".format(num)
raise RuntimeError(msg)
if len(inp) != 2 or not isinstance(inp[0], str):
msg = (
"Graph input {} is not valid,"
" expected ('name', shape) or ('name', (shape, dtype))".format(inp)
)
raise RuntimeError(msg)
if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str):
new_input_infos.append((inp[0], (inp[1], default_dtype)))
else:
new_input_infos.append(inp)
input_types = [
(name, get_relay_ty(info[0], info[1], gi.type()))
for (name, info), gi in zip(new_input_infos, graph_inputs)
]
ir_inputs = [i.debugName() for i in graph_inputs]
for ir_input, (name, itype) in zip(ir_inputs, input_types):
inp = _expr.var(name, type_annotation=itype)
# Translate from graph input to user input name
input_vars[ir_input] = inp
return input_vars
def _unpack_tuple(tup):
def unpack(tup, num_fields):
return [_expr.TupleGetItem(tup, i) for i in range(num_fields)]
if isinstance(tup, _expr.Tuple):
return unpack(tup, len(tup.fields))
elif isinstance(tup.type_annotation, TupleType):
return unpack(tup, len(tup.type_annotation.fields))
# shouldn't happen
assert False
def _get_free_vars_from_block(block):
block_inp_names = _get_input_names(block)
bound_names = block_inp_names
free_vars = set()
for node in block.nodes():
inp_names = _get_input_names(node)
list_diff = [name for name in inp_names if name not in bound_names]
free_vars.update(list_diff)
bound_names += _get_output_names(node)
return free_vars
def get_use_chains(root_node, terminate=lambda _: False):
"""
Track a chain of users of this node forward, returning a list of chains
See get_attr_chains below for its usage
"""
def concat_lists(lists):
return itertools.chain.from_iterable(lists)
def inner(current, accum):
users = _get_users(current)
if not users or terminate(users):
return [accum]
return concat_lists([inner(nxt, accum + [nxt]) for nxt in users])
return inner(root_node, [root_node])
def get_attr_chains(root_getattr_node):
"""Returns chains of attribute access starting from root_getattr_node
For example, given attribute "block", as in "self.block" when "self" points
to the top level torch.nn.Module, it returns lists of attribute "chains",
e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params']
These sets of attributes form full attribute accessors. For example,
"self.block.1", "self.block.2" will return the second and third submodule,
and "self.block.0._packed_params" will return the parameters of the first
submodule.
"""
def terminate(users):
next_attrs = [user for user in users if user.kind() == "prim::GetAttr"]
return len(next_attrs) == 0
return get_use_chains(root_getattr_node, terminate)
def convert_params(graph, state_dict, use_parser_friendly_name=False):
"""
Return Relay vars and TVM NDArrays for input parameters
A chain of prim::GetAttr nodes is processed one at a time
"""
getattr_nodes = graph.findAllNodes("prim::GetAttr", recurse=True)
params = {}
param_tensors = {}
packed_param_map = {}
vars_by_name = {}
seen = set()
attr_name_sep = "_" if use_parser_friendly_name else "."
for node in getattr_nodes:
if _get_output_name(node) in seen:
continue
for getattrs in get_attr_chains(node):
seen.update(map(_get_output_name, getattrs))
full_attr = _getattr_full_name(getattrs, attr_name_sep)
full_attr_node_name = _get_output_name(getattrs[-1])
if full_attr.endswith("_packed_params"): # for quantized models
packed_param_map[full_attr_node_name] = full_attr
elif full_attr in state_dict:
if full_attr in vars_by_name:
var = vars_by_name[full_attr]
else:
torch_tensor = state_dict[full_attr]
tensor, var = _get_tensor_and_var(torch_tensor, full_attr)
param_tensors[full_attr] = tensor
vars_by_name[full_attr] = var
params[full_attr_node_name] = var
return params, param_tensors, packed_param_map
def get_all_op_names(graph):
"""Return all operator names in the input graph"""
nodes = list(graph.nodes())
prim_with_blocks = ["prim::If", "prim::Loop"]
for prim in prim_with_blocks:
prim_nodes = graph.findAllNodes(prim, recurse=True)
for prim_node in prim_nodes:
for block in prim_node.blocks():
nodes += block.nodes()
return set(node.kind() for node in nodes)
def from_pytorch(
script_module,
input_infos,
custom_convert_map=None,
default_dtype="float32",
use_parser_friendly_name=False,
keep_quantized_weight=False,
):
"""Load PyTorch model in the form of a scripted PyTorch model and convert into relay.
The companion parameters will be handled automatically.
Parameters
----------
script_module : TopLevelTracedModule object
TorchScripted PyTorch graph
Note: We currently only support traces (ie: torch.jit.trace(model, input))
input_infos : List of tuples
Can be (input name, input shape) or (input name, (input shape, input types))
Graph level input shape and type list
The same input names need to be used for deployment, so choose easy to
remember names (such as: input0, input1)
e.g.
[('input0', (1, 2)), ('input1', (3, 4))]
or
[('input0', ((1, 2), 'int')), ('input1', ((3, 4), 'float'))]
custom_convert_map : Dictionary of str to Relay op
A custom op conversion map in the same format as _convert_map above
default_type : str
The default dtype to use when type information is not provided by PyTorch.
use_parser_friendly_name : bool
When True, replace '.' with `_' in a original parameter name.
The Relay text parser treats a variable name followed by a period as a tuple element access,
so a variable name like "dense.weight" cannot be parsed correctly.
Use this option when you want to run the AnnotateSpans pass on the imported module.
keep_quantized_weight : bool
Return quantized weights and bias, rather than float ones. PyTorch stores quantized weights
in a custom format, so we cannot directly access 8 bit weights as Numpy arrays. We use
a PyTorch function to unpack quantized weights into float32 arrays and quantization
parameters. By default, we return float32 weights and rely on the QNN lowering and the
Relay constant folding pass to quantize weights at compile time. In BYOC use cases, however,
we cannot apply the constant folding pass on a QNN graph. If keep_quantized_weight is True,
we quantize weights in the frontend using a function that is equivalent to
qnn.op.quantize(...) operating on Numpy arrays.
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.runtime.NDArray
Dict of converted parameters stored in tvm.runtime.ndarray format
"""
import torch
mod = tvm.IRModule()
prelude = Prelude(mod)
enable_lower_all_tuples = True
converter = PyTorchOpConverter(prelude, default_dtype)
graph = script_module.graph.copy()
# Check if lower_all_tuples pass can be enabled
graph_inputs = list(graph.inputs())
for inp in graph_inputs:
if inp.type().kind() == "TupleType" or inp.type().kind() == "ListType":
enable_lower_all_tuples = False
break
_run_jit_passes(graph, enable_lower_all_tuples)
if custom_convert_map:
converter.update_convert_map(custom_convert_map)
op_names = get_all_op_names(graph)
converter.report_missing_conversion(op_names)
is_module = isinstance(script_module, torch.jit.ScriptModule)
params = script_module.state_dict() if is_module else {}
outputs = _get_relay_input_vars(
graph, input_infos, prelude, default_dtype=default_dtype, is_module=is_module
)
if use_parser_friendly_name:
new_names = [key.replace(".", "_") for key in params.keys()]
params = dict(zip(new_names, params.values()))
param_vars, tensors, packed_param_map = convert_params(graph, params, use_parser_friendly_name)
tvm_params = {k: tvm.nd.array(v) for k, v in tensors.items()}
outputs.update(param_vars)
ret_name = _get_input_names(graph.return_node())
# For quantized models
quantized_ops = set(["aten::quantize_per_tensor", "quantized::linear_dynamic"])
if len(quantized_ops.intersection(set(op_names))) > 0:
weight_quant_params = qnn_torch.get_weight_quant_params(
script_module, packed_param_map.values()
)
qnn_torch.inline_input_quant_params_for_fx(graph, tensors)
input_scales_for_bias = qnn_torch.add_input_quant_params_to_op_inputs(graph)
qnn_torch.add_quant_params_to_outputs(
outputs,
packed_param_map,
weight_quant_params,
input_scales_for_bias,
keep_quantized_weight,
)
qnn_torch.add_quant_params(tvm_params, weight_quant_params)
converter.update_convert_map(qnn_torch.convert_map)
outputs = converter.convert_operators(_get_operator_nodes(graph.nodes()), outputs, ret_name)
# ListConstruct kept original python list. Convert to tuple.
outputs = [_expr.Tuple(output) if isinstance(output, list) else output for output in outputs]
if len(outputs) > 1:
ret = _expr.Tuple(outputs)
else:
ret = outputs[0]
# Separate data inputs and parameters to make sure data inputs come first.
func_args = []
data_inputs = []
for arg in _analysis.free_vars(ret):
if arg.name_hint not in tvm_params.keys():
data_inputs.append(arg)
else:
func_args.append(arg)
func_args = data_inputs + func_args
mod["main"] = tvm.relay.Function(func_args, ret)
return transform.RemoveUnusedFunctions()(mod), tvm_params
| 37.520233
| 120
| 0.578523
|
fc20c7ff23103be71b3e60c22a0f246c31449383
| 16,994
|
py
|
Python
|
art/utils.py
|
b0bbybaldi/adversarial-robustness-toolbox
|
68a170642d96194c4586d6f9e8c801ccf7b1c1f6
|
[
"MIT"
] | 1
|
2019-05-24T12:11:49.000Z
|
2019-05-24T12:11:49.000Z
|
art/utils.py
|
b0bbybaldi/adversarial-robustness-toolbox
|
68a170642d96194c4586d6f9e8c801ccf7b1c1f6
|
[
"MIT"
] | null | null | null |
art/utils.py
|
b0bbybaldi/adversarial-robustness-toolbox
|
68a170642d96194c4586d6f9e8c801ccf7b1c1f6
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module providing convenience functions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import numpy as np
from scipy.special import gammainc
logger = logging.getLogger(__name__)
def projection(v, eps, p):
"""
Project the values in `v` on the L_p norm ball of size `eps`.
:param v: Array of perturbations to clip.
:type v: `np.ndarray`
:param eps: Maximum norm allowed.
:type eps: `float`
:param p: L_p norm to use for clipping. Only 1, 2 and `np.Inf` supported for now.
:type p: `int`
:return: Values of `v` after projection.
:rtype: `np.ndarray`
"""
# Pick a small scalar to avoid division by 0
tol = 10e-8
v_ = v.reshape((v.shape[0], -1))
if p == 2:
v_ = v_ * np.expand_dims(np.minimum(1., eps / (np.linalg.norm(v_, axis=1) + tol)), axis=1)
elif p == 1:
v_ = v_ * np.expand_dims(np.minimum(1., eps / (np.linalg.norm(v_, axis=1, ord=1) + tol)), axis=1)
elif p == np.inf:
v_ = np.sign(v_) * np.minimum(abs(v_), eps)
else:
raise NotImplementedError('Values of `p` different from 1, 2 and `np.inf` are currently not supported.')
v = v_.reshape(v.shape)
return v
def random_sphere(nb_points, nb_dims, radius, norm):
"""
Generate randomly `m x n`-dimension points with radius `r` and centered around 0.
:param nb_points: Number of random data points
:type nb_points: `int`
:param nb_dims: Dimensionality
:type nb_dims: `int`
:param radius: Radius
:type radius: `float`
:param norm: Current support: 1, 2, np.inf
:type norm: `int`
:return: The generated random sphere
:rtype: `np.ndarray`
"""
if norm == 1:
a = np.zeros(shape=(nb_points, nb_dims + 1))
a[:, -1] = np.sqrt(np.random.uniform(0, radius ** 2, nb_points))
for i in range(nb_points):
a[i, 1:-1] = np.sort(np.random.uniform(0, a[i, -1], nb_dims - 1))
res = (a[:, 1:] - a[:, :-1]) * np.random.choice([-1, 1], (nb_points, nb_dims))
elif norm == 2:
a = np.random.randn(nb_points, nb_dims)
s2 = np.sum(a ** 2, axis=1)
base = gammainc(nb_dims / 2.0, s2 / 2.0) ** (1 / nb_dims) * radius / np.sqrt(s2)
res = a * (np.tile(base, (nb_dims, 1))).T
elif norm == np.inf:
res = np.random.uniform(float(-radius), float(radius), (nb_points, nb_dims))
else:
raise NotImplementedError("Norm {} not supported".format(norm))
return res
def to_categorical(labels, nb_classes=None):
"""
Convert an array of labels to binary class matrix.
:param labels: An array of integer labels of shape `(nb_samples,)`
:type labels: `np.ndarray`
:param nb_classes: The number of classes (possible labels)
:type nb_classes: `int`
:return: A binary matrix representation of `y` in the shape `(nb_samples, nb_classes)`
:rtype: `np.ndarray`
"""
labels = np.array(labels, dtype=np.int32)
if not nb_classes:
nb_classes = np.max(labels) + 1
categorical = np.zeros((labels.shape[0], nb_classes), dtype=np.float32)
categorical[np.arange(labels.shape[0]), np.squeeze(labels)] = 1
return categorical
def random_targets(labels, nb_classes):
"""
Given a set of correct labels, randomly choose target labels different from the original ones. These can be
one-hot encoded or integers.
:param labels: The correct labels
:type labels: `np.ndarray`
:param nb_classes: The number of classes for this model
:type nb_classes: `int`
:return: An array holding the randomly-selected target classes, one-hot encoded.
:rtype: `np.ndarray`
"""
if len(labels.shape) > 1:
labels = np.argmax(labels, axis=1)
result = np.zeros(labels.shape)
for class_ind in range(nb_classes):
other_classes = list(range(nb_classes))
other_classes.remove(class_ind)
in_cl = labels == class_ind
result[in_cl] = np.random.choice(other_classes)
return to_categorical(result, nb_classes)
def least_likely_class(x, classifier):
"""
Compute the least likely class predictions for sample `x`. This strategy for choosing attack targets was used in
(Kurakin et al., 2016). See https://arxiv.org/abs/1607.02533.
:param x: A data sample of shape accepted by `classifier`.
:type x: `np.ndarray`
:param classifier: The classifier used for computing predictions.
:type classifier: `Classifier`
:return: Least-likely class predicted by `classifier` for sample `x` in one-hot encoding.
:rtype: `np.ndarray`
"""
return to_categorical(np.argmin(classifier.predict(x), axis=1), nb_classes=classifier.nb_classes)
def get_label_conf(y_vec):
"""
Returns the confidence and the label of the most probable class given a vector of class confidences
:param y_vec: (np.ndarray) vector of class confidences, nb of instances as first dimension
:return: (np.ndarray, np.ndarray) confidences and labels
"""
assert len(y_vec.shape) == 2
confs, labels = np.amax(y_vec, axis=1), np.argmax(y_vec, axis=1)
return confs, labels
def get_labels_np_array(preds):
"""Returns the label of the most probable class given a array of class confidences.
See get_labels_tf_tensor() for tensorflow version
:param preds: (np.ndarray) array of class confidences, nb of instances as first dimension
:return: (np.ndarray) labels
"""
preds_max = np.amax(preds, axis=1, keepdims=True)
y = (preds == preds_max).astype(float)
return y
def preprocess(x, y, nb_classes=10, max_value=255):
"""Scales `x` to [0, 1] and converts `y` to class categorical confidences.
:param x: Data instances
:type x: `np.ndarray`
:param y: Labels
:type y: `np.ndarray`
:param nb_classes: Number of classes in dataset
:type nb_classes: `int`
:param max_value: Original maximum allowed value for features
:type max_value: `int`
:return: rescaled values of `x`, `y`
:rtype: `tuple`
"""
x = x.astype('float32') / max_value
y = to_categorical(y, nb_classes)
return x, y
# -------------------------------------------------------------------------------------------------------- IO FUNCTIONS
def load_cifar10(raw=False):
"""Loads CIFAR10 dataset from config.CIFAR10_PATH or downloads it if necessary.
:param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1.
:type raw: `bool`
:return: `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
"""
import keras.backend as k
from keras.datasets.cifar import load_batch
from keras.utils.data_utils import get_file
from art import DATA_PATH
path = get_file('cifar-10-batches-py', untar=True, cache_subdir=DATA_PATH,
origin='http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
num_train_samples = 50000
x_train = np.zeros((num_train_samples, 3, 32, 32), dtype=np.uint8)
y_train = np.zeros((num_train_samples,), dtype=np.uint8)
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
data, labels = load_batch(fpath)
x_train[(i - 1) * 10000: i * 10000, :, :, :] = data
y_train[(i - 1) * 10000: i * 10000] = labels
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if k.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
min_, max_ = 0, 255
if not raw:
min_, max_ = 0., 1.
x_train, y_train = preprocess(x_train, y_train)
x_test, y_test = preprocess(x_test, y_test)
return (x_train, y_train), (x_test, y_test), min_, max_
def load_mnist(raw=False):
"""Loads MNIST dataset from `DATA_PATH` or downloads it if necessary.
:param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1.
:type raw: `bool`
:return: `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
"""
from keras.utils.data_utils import get_file
from art import DATA_PATH
path = get_file('mnist.npz', cache_subdir=DATA_PATH, origin='https://s3.amazonaws.com/img-datasets/mnist.npz')
f = np.load(path)
x_train = f['x_train']
y_train = f['y_train']
x_test = f['x_test']
y_test = f['y_test']
f.close()
# Add channel axis
min_, max_ = 0, 255
if not raw:
min_, max_ = 0., 1.
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_train, y_train = preprocess(x_train, y_train)
x_test, y_test = preprocess(x_test, y_test)
return (x_train, y_train), (x_test, y_test), min_, max_
def load_stl():
"""Loads the STL-10 dataset from config.STL10_PATH or downloads it if necessary.
:return: `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
"""
from os.path import join
import keras.backend as k
from keras.utils.data_utils import get_file
from art import DATA_PATH
min_, max_ = 0., 1.
# Download and extract data if needed
path = get_file('stl10_binary', cache_subdir=DATA_PATH, untar=True,
origin='https://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz')
with open(join(path, str('train_X.bin')), str('rb')) as f:
x_train = np.fromfile(f, dtype=np.uint8)
x_train = np.reshape(x_train, (-1, 3, 96, 96))
with open(join(path, str('test_X.bin')), str('rb')) as f:
x_test = np.fromfile(f, dtype=np.uint8)
x_test = np.reshape(x_test, (-1, 3, 96, 96))
if k.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
with open(join(path, str('train_y.bin')), str('rb')) as f:
y_train = np.fromfile(f, dtype=np.uint8)
y_train -= 1
with open(join(path, str('test_y.bin')), str('rb')) as f:
y_test = np.fromfile(f, dtype=np.uint8)
y_test -= 1
x_train, y_train = preprocess(x_train, y_train)
x_test, y_test = preprocess(x_test, y_test)
return (x_train, y_train), (x_test, y_test), min_, max_
def load_dataset(name):
"""
Loads or downloads the dataset corresponding to `name`. Options are: `mnist`, `cifar10` and `stl10`.
:param name: Name of the dataset
:type name: `str`
:return: The dataset separated in training and test sets as `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
:raises NotImplementedError: If the dataset is unknown.
"""
if "mnist" in name:
return load_mnist()
elif "cifar10" in name:
return load_cifar10()
elif "stl10" in name:
return load_stl()
else:
raise NotImplementedError("There is no loader for dataset '{}'.".format(name))
def make_directory(dir_path):
"""
Creates the specified tree of directories if needed.
:param dir_path: (str) directory or file path
:return: None
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def get_npy_files(path):
"""
Generator returning all the npy files in path subdirectories.
:param path: (str) directory path
:return: (str) paths
"""
for root, _, files in os.walk(path):
for file_ in files:
if file_.endswith(".npy"):
yield os.path.join(root, file_)
# ------------------------------------------------------------------- ARG PARSER
def get_args(prog, load_classifier=False, load_sample=False, per_batch=False, options=""):
"""
Parser for all scripts
:param prog: name of the script calling the function
:param load_classifier: bool, load a model, default False
:param load_sample: bool, load (adversarial) data for training, default False
:param per_batch: bool, load data in batches, default False
:param options:
:return: parsed arguments
"""
parser = argparse.ArgumentParser(prog=prog, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
option_dict = {
"a": {"flags": ["-a", "--adv"],
"kwargs": {"type": str, "dest": 'adv_method', "default": "fgsm",
"choices": ["fgsm", "deepfool", "universal", "jsma", "vat", "carlini", "rnd_fgsm"],
"help": 'choice of attacker'}},
"b": {"flags": ["-b", "--batchsize"],
"kwargs": {"type": int, "dest": 'batch_size', "default": 128, "help": 'size of the batches'}},
"c": {"flags": ["-c", "--classifier"],
"kwargs": {"type": str, "dest": 'classifier', "default": "cnn", "choices": ["cnn", "resnet", "mlp"],
"help": 'choice of classifier'}},
"d": {"flags": ["-d", "--dataset"],
"kwargs": {"type": str, "dest": 'dataset', "default": "mnist",
"help": 'either the path or name of the dataset the classifier is tested/trained on.'}},
"e": {"flags": ["-e", "--epochs"],
"kwargs": {"type": int, "dest": 'nb_epochs', "default": 20,
"help": 'number of epochs for training the classifier'}},
"f": {"flags": ["-f", "--act"],
"kwargs": {"type": str, "dest": 'act', "default": "relu", "choices": ["relu", "brelu"],
"help": 'choice of activation function'}},
"n": {"flags": ["-n", "--nbinstances"],
"kwargs": {"type": int, "dest": 'nb_instances', "default": 1,
"help": 'number of supplementary instances per true example'}},
"r": {"flags": ["-r", "--valsplit"],
"kwargs": {"type": float, "dest": 'val_split', "default": 0.1,
"help": 'ratio of training sample used for validation'}},
"s": {"flags": ["-s", "--save"],
"kwargs": {"nargs": '?', "type": str, "dest": 'save', "default": False,
"help": 'if set, the classifier is saved; if an argument is provided it is used as path to'
' store the model'}},
"t": {"flags": ["-t", "--stdev"],
"kwargs": {"type": float, "dest": 'std_dev', "default": 0.1,
"help": 'standard deviation of the distributions'}},
"v": {"flags": ["-v", "--verbose"],
"kwargs": {"dest": 'verbose', "action": "store_true", "help": 'if set, verbose mode'}},
"z": {"flags": ["-z", "--defences"],
"kwargs": {"dest": 'defences', "nargs": "*", "default": None, "help": 'list of basic defences.'}},
}
# Add required arguments
if load_classifier:
parser.add_argument("load", type=str, help='the classifier is loaded from `load` directory.')
if load_sample:
parser.add_argument("adv_path", type=str, help='path to the dataset for data augmentation training.')
if per_batch:
parser.add_argument("batch_idx", type=int, help='index of the batch to use.')
# Add optional arguments
for o in options:
parser.add_argument(*option_dict[o]["flags"], **option_dict[o]["kwargs"])
return parser.parse_args()
def get_verbose_print(verbose):
"""
Sets verbose mode.
:param verbose: (bool) True for verbose, False for quiet
:return: (function) printing function
"""
if verbose:
return print
else:
return lambda *a, **k: None
| 37.514349
| 120
| 0.619866
|
6d9332fd23d153a16b08083b8b5821e556a4f859
| 6,248
|
py
|
Python
|
trio/_core/_mock_clock.py
|
RmStorm/trio
|
708c2e3029f210832ee8c9354887879e0161b065
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
trio/_core/_mock_clock.py
|
RmStorm/trio
|
708c2e3029f210832ee8c9354887879e0161b065
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
trio/_core/_mock_clock.py
|
RmStorm/trio
|
708c2e3029f210832ee8c9354887879e0161b065
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import time
from math import inf
from .. import _core
from ._run import GLOBAL_RUN_CONTEXT
from .._abc import Clock
from .._util import SubclassingDeprecatedIn_v0_15_0
################################################################
# The glorious MockClock
################################################################
# Prior art:
# https://twistedmatrix.com/documents/current/api/twisted.internet.task.Clock.html
# https://github.com/ztellman/manifold/issues/57
class MockClock(Clock, metaclass=SubclassingDeprecatedIn_v0_15_0):
"""A user-controllable clock suitable for writing tests.
Args:
rate (float): the initial :attr:`rate`.
autojump_threshold (float): the initial :attr:`autojump_threshold`.
.. attribute:: rate
How many seconds of clock time pass per second of real time. Default is
0.0, i.e. the clock only advances through manuals calls to :meth:`jump`
or when the :attr:`autojump_threshold` is triggered. You can assign to
this attribute to change it.
.. attribute:: autojump_threshold
The clock keeps an eye on the run loop, and if at any point it detects
that all tasks have been blocked for this many real seconds (i.e.,
according to the actual clock, not this clock), then the clock
automatically jumps ahead to the run loop's next scheduled
timeout. Default is :data:`math.inf`, i.e., to never autojump. You can
assign to this attribute to change it.
Basically the idea is that if you have code or tests that use sleeps
and timeouts, you can use this to make it run much faster, totally
automatically. (At least, as long as those sleeps/timeouts are
happening inside Trio; if your test involves talking to external
service and waiting for it to timeout then obviously we can't help you
there.)
You should set this to the smallest value that lets you reliably avoid
"false alarms" where some I/O is in flight (e.g. between two halves of
a socketpair) but the threshold gets triggered and time gets advanced
anyway. This will depend on the details of your tests and test
environment. If you aren't doing any I/O (like in our sleeping example
above) then just set it to zero, and the clock will jump whenever all
tasks are blocked.
.. note:: If you use ``autojump_threshold`` and
`wait_all_tasks_blocked` at the same time, then you might wonder how
they interact, since they both cause things to happen after the run
loop goes idle for some time. The answer is:
`wait_all_tasks_blocked` takes priority. If there's a task blocked
in `wait_all_tasks_blocked`, then the autojump feature treats that
as active task and does *not* jump the clock.
"""
def __init__(self, rate=0.0, autojump_threshold=inf):
# when the real clock said 'real_base', the virtual time was
# 'virtual_base', and since then it's advanced at 'rate' virtual
# seconds per real second.
self._real_base = 0.0
self._virtual_base = 0.0
self._rate = 0.0
self._autojump_threshold = 0.0
# kept as an attribute so that our tests can monkeypatch it
self._real_clock = time.perf_counter
# use the property update logic to set initial values
self.rate = rate
self.autojump_threshold = autojump_threshold
def __repr__(self):
return "<MockClock, time={:.7f}, rate={} @ {:#x}>".format(
self.current_time(), self._rate, id(self)
)
@property
def rate(self):
return self._rate
@rate.setter
def rate(self, new_rate):
if new_rate < 0:
raise ValueError("rate must be >= 0")
else:
real = self._real_clock()
virtual = self._real_to_virtual(real)
self._virtual_base = virtual
self._real_base = real
self._rate = float(new_rate)
@property
def autojump_threshold(self):
return self._autojump_threshold
@autojump_threshold.setter
def autojump_threshold(self, new_autojump_threshold):
self._autojump_threshold = float(new_autojump_threshold)
self._try_resync_autojump_threshold()
# runner.clock_autojump_threshold is an internal API that isn't easily
# usable by custom third-party Clock objects. If you need access to this
# functionality, let us know, and we'll figure out how to make a public
# API. Discussion:
#
# https://github.com/python-trio/trio/issues/1587
def _try_resync_autojump_threshold(self):
try:
runner = GLOBAL_RUN_CONTEXT.runner
if runner.is_guest:
runner.force_guest_tick_asap()
except AttributeError:
pass
else:
runner.clock_autojump_threshold = self._autojump_threshold
# Invoked by the run loop when runner.clock_autojump_threshold is
# exceeded.
def _autojump(self):
statistics = _core.current_statistics()
jump = statistics.seconds_to_next_deadline
if 0 < jump < inf:
self.jump(jump)
def _real_to_virtual(self, real):
real_offset = real - self._real_base
virtual_offset = self._rate * real_offset
return self._virtual_base + virtual_offset
def start_clock(self):
self._try_resync_autojump_threshold()
def current_time(self):
return self._real_to_virtual(self._real_clock())
def deadline_to_sleep_time(self, deadline):
virtual_timeout = deadline - self.current_time()
if virtual_timeout <= 0:
return 0
elif self._rate > 0:
return virtual_timeout / self._rate
else:
return 999999999
def jump(self, seconds):
"""Manually advance the clock by the given number of seconds.
Args:
seconds (float): the number of seconds to jump the clock forward.
Raises:
ValueError: if you try to pass a negative value for ``seconds``.
"""
if seconds < 0:
raise ValueError("time can't go backwards")
self._virtual_base += seconds
| 37.638554
| 84
| 0.649808
|
70b33e0510862179f13f8965a43f0354ea7bc9cb
| 3,036
|
py
|
Python
|
codescroll/_project_information_builder.py
|
Suresoft-GLaDOS/cxbuild
|
1eb568bc11ae8854b1a6025c969ec94c96d6a4a9
|
[
"MIT"
] | 2
|
2021-11-01T02:11:59.000Z
|
2021-11-04T09:19:45.000Z
|
codescroll/_project_information_builder.py
|
HansolChoe/cxbuild
|
c289e40efdf92f34e7781772b3b84e0a1c7d0af2
|
[
"MIT"
] | 3
|
2021-11-04T06:23:38.000Z
|
2021-11-19T01:54:05.000Z
|
codescroll/_project_information_builder.py
|
HansolChoe/cxbuild
|
c289e40efdf92f34e7781772b3b84e0a1c7d0af2
|
[
"MIT"
] | 2
|
2021-11-01T03:01:28.000Z
|
2021-11-04T09:19:28.000Z
|
import json
import cslib
import compiler
from codescroll.runner import *
def _normalize_command_list(commands):
# commands 는 모두 절대경로로 변경된 상태로 들어온다
# absolute_command_list 에 따옴표가 들어 있는 것들을 노멀라이즈한 후 검토
# STCS-165, 매크로 인자에도 " 가 존재할 수 있으므로 첫번째와 마지막에 같은 기호가 존재하는지 확인
normalize_commands = []
for cmd in commands:
if len(cmd) > 1 and (cmd[0] == '"' or cmd[0] == "'") and cmd[0] == cmd[-1]:
cmd = cmd.replace(cmd[0], '')
normalize_commands.append(cmd)
return normalize_commands
class _ProjectInformationBuilder(Runner):
def start(self, options, cstrace_json_filepath=None):
if not os.path.exists(cstrace_json_filepath):
return False, None
project_json_path = os.path.join(libcsbuild.get_working_dir(), "project.json")
with open(cstrace_json_filepath, "r") as tracefile:
traces = json.load(tracefile)
if os.path.exists(project_json_path):
os.remove(project_json_path)
project_json = self._create_project_json(traces)
with open(project_json_path, 'w', encoding='utf-8') as project_json_file:
json.dump(project_json, project_json_file, indent=4)
libcsbuild.step_message("project.json written [%s]" % project_json_path)
return True, project_json_path
def get_open_files(self, pid, traces):
collects = []
collects.extend(traces[pid]['open'])
for child_pid in traces[pid]['sigchld']:
collects.extend(self.get_open_files(child_pid, traces))
return collects
def _create_project_json(self, traces):
sources = self._source_list(traces)
module = {
"dependencies": [],
"linkFlags": [],
"linkType": "not yet",
"name": "not yet",
"sources": sources
}
project = {'modules': [module]}
return project
def _source_list(self, traces):
ret = []
for pid in traces:
if traces[pid]['execve'] is None:
continue
invoked_command = traces[pid]['execve'][0]
build_location = traces[pid]['execve'][2]
commands = _normalize_command_list(traces[pid]['execve'][1])
if not cslib.is_interest_call(invoked_command):
continue
compiler_tool = compiler.create(invoked_command, commands)
open_files = list(set(self.get_open_files(pid, traces)))
open_files.sort()
options, compiling_source_files = compiler_tool.split_command()
for sourcefile in compiling_source_files:
source = {
'dependencies': open_files,
'buildLocation': build_location,
'originalPath': sourcefile,
'originalFlags': "not-yet",
'command': options,
'compiler': invoked_command
}
ret.append(source)
return ret
| 34.11236
| 86
| 0.589262
|
bc527ca716ff9f8bed67ff5a0e3707c54d5bbc06
| 1,006
|
py
|
Python
|
earthquakeformat/example2.py
|
pinaky/utilities
|
c76123defa2b233992399a5647588684c54264d4
|
[
"MIT"
] | null | null | null |
earthquakeformat/example2.py
|
pinaky/utilities
|
c76123defa2b233992399a5647588684c54264d4
|
[
"MIT"
] | null | null | null |
earthquakeformat/example2.py
|
pinaky/utilities
|
c76123defa2b233992399a5647588684c54264d4
|
[
"MIT"
] | null | null | null |
from earthquakeformat import CSMIPVolume2Record
import os.path
s = CSMIPVolume2Record('test2.V2')
# --- This block of commands export acceleration data from all channels to CSV format
basename = 'CE1430V2_ACC_'
channelnumber = 0
# From each channel, get (#data points, time step) and check that they're all the same.
vals=[(e['info']['int_numv2accdpts'][1], e['info']['dbl_avgdt'][1]) for e in s.channels]
assert len(set(vals)) == 1, 'All channels do not have the same number of acceleration points. Quitting...'
nptsa = vals[0][0]
dt = vals[0][1] * 1.0E-3 # In milliseconds, so needs to be multiplied by 1E-3
file = open(basename+'{0}'.format(channelnumber)+'.csv', 'w')
header = 'Time (s),'+''.join(['Channel {0:02d} Acceleration (cm/s2),'.format(e) for e in range(len(s.channels))])+'\n'
file.write(header)
for i in range(nptsa):
line = '{0},'.format(i * dt)+''.join(['{0},'.format(e['adata'][i]) for e in s.channels])+'\n'
file.write(line)
file.close()
# ---
| 41.916667
| 119
| 0.65507
|
7c358cffc20e4e15f117068a8a7d4885e3913b15
| 27,325
|
py
|
Python
|
aiokafka/client.py
|
patkivikram/aiokafka-1
|
bbbfc4bf4ed02c0a25b4a86c8ec7afa5b5867975
|
[
"Apache-2.0"
] | 1
|
2021-10-01T08:02:17.000Z
|
2021-10-01T08:02:17.000Z
|
aiokafka/client.py
|
patkivikram/aiokafka-1
|
bbbfc4bf4ed02c0a25b4a86c8ec7afa5b5867975
|
[
"Apache-2.0"
] | null | null | null |
aiokafka/client.py
|
patkivikram/aiokafka-1
|
bbbfc4bf4ed02c0a25b4a86c8ec7afa5b5867975
|
[
"Apache-2.0"
] | 2
|
2020-12-04T22:05:41.000Z
|
2022-03-16T22:26:49.000Z
|
import asyncio
import logging
import random
import time
from kafka.conn import collect_hosts
from kafka.protocol.metadata import MetadataRequest
from kafka.protocol.commit import OffsetFetchRequest
from kafka.protocol.fetch import FetchRequest
import aiokafka.errors as Errors
from aiokafka import __version__
from aiokafka.conn import create_conn, CloseReason
from aiokafka.cluster import ClusterMetadata
from aiokafka.protocol.coordination import FindCoordinatorRequest
from aiokafka.protocol.produce import ProduceRequest
from aiokafka.errors import (
KafkaError,
KafkaConnectionError,
NodeNotReadyError,
RequestTimedOutError,
UnknownTopicOrPartitionError,
UnrecognizedBrokerVersion,
StaleMetadata)
from aiokafka.util import (
create_task, create_future, parse_kafka_version, get_running_loop
)
from aiokafka.protocol.group import SyncGroupRequest
__all__ = ['AIOKafkaClient']
log = logging.getLogger('aiokafka')
class ConnectionGroup:
DEFAULT = 0
COORDINATION = 1
class CoordinationType:
GROUP = 0
TRANSACTION = 1
class AIOKafkaClient:
"""Initialize an asynchronous kafka client
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'aiokafka-{ver}'
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 40000.
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen
any partition leadership changes to proactively discover any
new brokers or partitions. Default: 300000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
api_version (str): specify which kafka API version to use.
AIOKafka supports Kafka API versions >=0.9 only.
If set to 'auto', will attempt to infer the broker version by
probing various APIs. Default: auto
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. For more information see :ref:`ssl_auth`.
Default: None.
connections_max_idle_ms (int): Close idle connections after the number
of milliseconds specified by this config. Specifying `None` will
disable idle checks. Default: 540000 (9 minutes).
"""
def __init__(self, *, loop=None, bootstrap_servers='localhost',
client_id='aiokafka-' + __version__,
metadata_max_age_ms=300000,
request_timeout_ms=40000,
retry_backoff_ms=100,
ssl_context=None,
security_protocol='PLAINTEXT',
api_version='auto',
connections_max_idle_ms=540000,
sasl_mechanism='PLAIN',
sasl_plain_username=None,
sasl_plain_password=None,
sasl_kerberos_service_name='kafka',
sasl_kerberos_domain_name=None,
sasl_oauth_token_provider=None
):
if loop is None:
loop = get_running_loop()
if security_protocol not in (
'SSL', 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL'):
raise ValueError("`security_protocol` should be SSL or PLAINTEXT")
if security_protocol in ["SSL", "SASL_SSL"] and ssl_context is None:
raise ValueError(
"`ssl_context` is mandatory if security_protocol=='SSL'")
if security_protocol in ["SASL_SSL", "SASL_PLAINTEXT"]:
if sasl_mechanism not in (
"PLAIN", "GSSAPI", "SCRAM-SHA-256", "SCRAM-SHA-512",
"OAUTHBEARER"):
raise ValueError(
"only `PLAIN`, `GSSAPI`, `SCRAM-SHA-256`, "
"`SCRAM-SHA-512` and `OAUTHBEARER`"
"sasl_mechanism are supported "
"at the moment")
if sasl_mechanism == "PLAIN" and \
(sasl_plain_username is None or sasl_plain_password is None):
raise ValueError(
"sasl_plain_username and sasl_plain_password required for "
"PLAIN sasl")
self._bootstrap_servers = bootstrap_servers
self._client_id = client_id
self._metadata_max_age_ms = metadata_max_age_ms
self._request_timeout_ms = request_timeout_ms
if api_version != "auto":
api_version = parse_kafka_version(api_version)
self._api_version = api_version
self._security_protocol = security_protocol
self._ssl_context = ssl_context
self._retry_backoff = retry_backoff_ms / 1000
self._connections_max_idle_ms = connections_max_idle_ms
self._sasl_mechanism = sasl_mechanism
self._sasl_plain_username = sasl_plain_username
self._sasl_plain_password = sasl_plain_password
self._sasl_kerberos_service_name = sasl_kerberos_service_name
self._sasl_kerberos_domain_name = sasl_kerberos_domain_name
self._sasl_oauth_token_provider = sasl_oauth_token_provider
self.cluster = ClusterMetadata(metadata_max_age_ms=metadata_max_age_ms)
self._topics = set() # empty set will fetch all topic metadata
self._conns = {}
self._loop = loop
self._sync_task = None
self._md_update_fut = None
self._md_update_waiter = create_future()
self._get_conn_lock = asyncio.Lock()
def __repr__(self):
return '<AIOKafkaClient client_id=%s>' % self._client_id
@property
def api_version(self):
if type(self._api_version) is tuple:
return self._api_version
# unknown api version, return minimal supported version
return (0, 9, 0)
@property
def hosts(self):
return collect_hosts(self._bootstrap_servers)
async def close(self):
if self._sync_task:
self._sync_task.cancel()
try:
await self._sync_task
except asyncio.CancelledError:
pass
self._sync_task = None
# Be careful to wait for graceful closure of all connections, so we
# process all pending buffers.
futs = []
for conn in self._conns.values():
futs.append(conn.close(reason=CloseReason.SHUTDOWN))
if futs:
await asyncio.gather(*futs)
async def bootstrap(self):
"""Try to to bootstrap initial cluster metadata"""
assert self._loop is asyncio.get_event_loop(), (
"Please create objects with the same loop as running with"
)
# using request v0 for bootstrap if not sure v1 is available
if self._api_version == "auto" or self._api_version < (0, 10):
metadata_request = MetadataRequest[0]([])
else:
metadata_request = MetadataRequest[1]([])
version_hint = None
if self._api_version != "auto":
version_hint = self._api_version
for host, port, _ in self.hosts:
log.debug("Attempting to bootstrap via node at %s:%s", host, port)
try:
bootstrap_conn = await create_conn(
host, port, client_id=self._client_id,
request_timeout_ms=self._request_timeout_ms,
ssl_context=self._ssl_context,
security_protocol=self._security_protocol,
max_idle_ms=self._connections_max_idle_ms,
sasl_mechanism=self._sasl_mechanism,
sasl_plain_username=self._sasl_plain_username,
sasl_plain_password=self._sasl_plain_password,
sasl_kerberos_service_name=self._sasl_kerberos_service_name, # noqa: ignore=E501
sasl_kerberos_domain_name=self._sasl_kerberos_domain_name,
sasl_oauth_token_provider=self._sasl_oauth_token_provider,
version_hint=version_hint)
except (OSError, asyncio.TimeoutError) as err:
log.error('Unable connect to "%s:%s": %s', host, port, err)
continue
try:
metadata = await bootstrap_conn.send(metadata_request)
except (KafkaError, asyncio.TimeoutError) as err:
log.warning('Unable to request metadata from "%s:%s": %s',
host, port, err)
bootstrap_conn.close()
continue
self.cluster.update_metadata(metadata)
# A cluster with no topics can return no broker metadata...
# In that case, we should keep the bootstrap connection till
# we get a normal cluster layout.
if not len(self.cluster.brokers()):
bootstrap_id = ('bootstrap', ConnectionGroup.DEFAULT)
self._conns[bootstrap_id] = bootstrap_conn
else:
bootstrap_conn.close()
log.debug('Received cluster metadata: %s', self.cluster)
break
else:
raise KafkaConnectionError(
'Unable to bootstrap from {}'.format(self.hosts))
# detect api version if need
if self._api_version == 'auto':
self._api_version = await self.check_version()
if self._sync_task is None:
# starting metadata synchronizer task
self._sync_task = create_task(self._md_synchronizer())
async def _md_synchronizer(self):
"""routine (async task) for synchronize cluster metadata every
`metadata_max_age_ms` milliseconds"""
while True:
await asyncio.wait(
[self._md_update_waiter],
timeout=self._metadata_max_age_ms / 1000)
topics = self._topics
if self._md_update_fut is None:
self._md_update_fut = create_future()
ret = await self._metadata_update(self.cluster, topics)
# If list of topics changed during metadata update we must update
# it again right away.
if topics != self._topics:
continue
# Earlier this waiter was set before sending metadata_request,
# but that was to avoid topic list changes being unnoticed, which
# is handled explicitly now.
self._md_update_waiter = create_future()
self._md_update_fut.set_result(ret)
self._md_update_fut = None
def get_random_node(self):
"""choice random node from known cluster brokers
Returns:
nodeId - identifier of broker
"""
nodeids = [b.nodeId for b in self.cluster.brokers()]
if not nodeids:
return None
return random.choice(nodeids)
async def _metadata_update(self, cluster_metadata, topics):
assert isinstance(cluster_metadata, ClusterMetadata)
topics = list(topics)
version_id = 0 if self.api_version < (0, 10) else 1
if version_id == 1 and not topics:
topics = None
metadata_request = MetadataRequest[version_id](topics)
nodeids = [b.nodeId for b in self.cluster.brokers()]
bootstrap_id = ('bootstrap', ConnectionGroup.DEFAULT)
if bootstrap_id in self._conns:
nodeids.append('bootstrap')
random.shuffle(nodeids)
for node_id in nodeids:
conn = await self._get_conn(node_id)
if conn is None:
continue
log.debug("Sending metadata request %s to node %s",
metadata_request, node_id)
try:
metadata = await conn.send(metadata_request)
except (KafkaError, asyncio.TimeoutError) as err:
log.error(
'Unable to request metadata from node with id %s: %r',
node_id, err)
continue
# don't update the cluster if there are no valid nodes...the topic
# we want may still be in the process of being created which means
# we will get errors and no nodes until it exists
if not metadata.brokers:
return False
cluster_metadata.update_metadata(metadata)
# We only keep bootstrap connection to update metadata until
# proper cluster layout is available.
if bootstrap_id in self._conns and len(self.cluster.brokers()):
conn = self._conns.pop(bootstrap_id)
conn.close()
break
else:
log.error('Unable to update metadata from %s', nodeids)
cluster_metadata.failed_update(None)
return False
return True
def force_metadata_update(self):
"""Update cluster metadata
Returns:
True/False - metadata updated or not
"""
if self._md_update_fut is None:
# Wake up the `_md_synchronizer` task
if not self._md_update_waiter.done():
self._md_update_waiter.set_result(None)
self._md_update_fut = create_future()
# Metadata will be updated in the background by syncronizer
return asyncio.shield(self._md_update_fut)
async def fetch_all_metadata(self):
cluster_md = ClusterMetadata(
metadata_max_age_ms=self._metadata_max_age_ms)
updated = await self._metadata_update(cluster_md, [])
if not updated:
raise KafkaError(
'Unable to get cluster metadata over all known brokers')
return cluster_md
def add_topic(self, topic):
"""Add a topic to the list of topics tracked via metadata.
Arguments:
topic (str): topic to track
"""
if topic in self._topics:
res = create_future()
res.set_result(True)
else:
res = self.force_metadata_update()
self._topics.add(topic)
return res
def set_topics(self, topics):
"""Set specific topics to track for metadata.
Arguments:
topics (list of str): topics to track
"""
assert not isinstance(topics, str)
if not topics or set(topics).difference(self._topics):
res = self.force_metadata_update()
else:
res = create_future()
res.set_result(True)
self._topics = set(topics)
return res
def _on_connection_closed(self, conn, reason):
""" Callback called when connection is closed
"""
# Connection failures imply that our metadata is stale, so let's
# refresh
if reason == CloseReason.CONNECTION_BROKEN or \
reason == CloseReason.CONNECTION_TIMEOUT:
self.force_metadata_update()
async def _get_conn(
self, node_id, *, group=ConnectionGroup.DEFAULT,
no_hint=False
):
"Get or create a connection to a broker using host and port"
conn_id = (node_id, group)
if conn_id in self._conns:
conn = self._conns[conn_id]
if not conn.connected():
del self._conns[conn_id]
else:
return conn
try:
if group == ConnectionGroup.DEFAULT:
broker = self.cluster.broker_metadata(node_id)
# XXX: earlier we only did an assert here, but it seems it's
# possible to get a leader that is for some reason not in
# metadata.
# I think requerying metadata should solve this problem
if broker is None:
raise StaleMetadata(
'Broker id %s not in current metadata' % node_id)
else:
broker = self.cluster.coordinator_metadata(node_id)
assert broker is not None
log.debug("Initiating connection to node %s at %s:%s",
node_id, broker.host, broker.port)
async with self._get_conn_lock:
if conn_id in self._conns:
return self._conns[conn_id]
version_hint = self._api_version
if version_hint == "auto" or no_hint:
version_hint = None
self._conns[conn_id] = await create_conn(
broker.host, broker.port,
client_id=self._client_id,
request_timeout_ms=self._request_timeout_ms,
ssl_context=self._ssl_context,
security_protocol=self._security_protocol,
on_close=self._on_connection_closed,
max_idle_ms=self._connections_max_idle_ms,
sasl_mechanism=self._sasl_mechanism,
sasl_plain_username=self._sasl_plain_username,
sasl_plain_password=self._sasl_plain_password,
sasl_kerberos_service_name=self._sasl_kerberos_service_name, # noqa: ignore=E501
sasl_kerberos_domain_name=self._sasl_kerberos_domain_name,
sasl_oauth_token_provider=self._sasl_oauth_token_provider,
version_hint=version_hint
)
except (OSError, asyncio.TimeoutError, KafkaError) as err:
log.error('Unable connect to node with id %s: %s', node_id, err)
if group == ConnectionGroup.DEFAULT:
# Connection failures imply that our metadata is stale, so
# let's refresh
self.force_metadata_update()
return None
else:
return self._conns[conn_id]
async def ready(self, node_id, *, group=ConnectionGroup.DEFAULT):
conn = await self._get_conn(node_id, group=group)
if conn is None:
return False
return True
async def send(self, node_id, request, *, group=ConnectionGroup.DEFAULT):
"""Send a request to a specific node.
Arguments:
node_id (int): destination node
request (Struct): request object (not-encoded)
Raises:
kafka.errors.RequestTimedOutError
kafka.errors.NodeNotReadyError
kafka.errors.KafkaConnectionError
kafka.errors.CorrelationIdError
Returns:
Future: resolves to Response struct
"""
if not (await self.ready(node_id, group=group)):
raise NodeNotReadyError(
"Attempt to send a request to node"
" which is not ready (node id {}).".format(node_id))
# Every request gets a response, except one special case:
expect_response = True
if isinstance(request, tuple(ProduceRequest)) and \
request.required_acks == 0:
expect_response = False
future = self._conns[(node_id, group)].send(
request, expect_response=expect_response)
try:
result = await future
except asyncio.TimeoutError:
# close connection so it is renewed in next request
self._conns[(node_id, group)].close(
reason=CloseReason.CONNECTION_TIMEOUT)
raise RequestTimedOutError()
else:
return result
async def check_version(self, node_id=None):
"""Attempt to guess the broker version"""
if node_id is None:
default_group_conns = [
n_id for (n_id, group) in self._conns.keys()
if group == ConnectionGroup.DEFAULT
]
if default_group_conns:
node_id = default_group_conns[0]
else:
assert self.cluster.brokers(), 'no brokers in metadata'
node_id = list(self.cluster.brokers())[0].nodeId
from kafka.protocol.admin import (
ListGroupsRequest_v0, ApiVersionRequest_v0)
from kafka.protocol.commit import (
OffsetFetchRequest_v0, GroupCoordinatorRequest_v0)
from kafka.protocol.metadata import MetadataRequest_v0
test_cases = [
((0, 10), ApiVersionRequest_v0()),
((0, 9), ListGroupsRequest_v0()),
((0, 8, 2), GroupCoordinatorRequest_v0('aiokafka-default-group')),
((0, 8, 1), OffsetFetchRequest_v0('aiokafka-default-group', [])),
((0, 8, 0), MetadataRequest_v0([])),
]
# kafka kills the connection when it does not recognize an API request
# so we can send a test request and then follow immediately with a
# vanilla MetadataRequest. If the server did not recognize the first
# request, both will be failed with a ConnectionError that wraps
# socket.error (32, 54, or 104)
conn = await self._get_conn(node_id, no_hint=True)
if conn is None:
raise KafkaConnectionError(
"No connection to node with id {}".format(node_id))
for version, request in test_cases:
try:
if not conn.connected():
await conn.connect()
assert conn, 'no connection to node with id {}'.format(node_id)
# request can be ignored by Kafka broker,
# so we send metadata request and wait response
task = create_task(conn.send(request))
await asyncio.wait([task], timeout=0.1)
try:
await conn.send(MetadataRequest_v0([]))
except KafkaError:
# metadata request can be cancelled in case
# of invalid correlationIds order
pass
response = await task
except KafkaError:
continue
else:
# To avoid having a connection in undefined state
if node_id != "bootstrap" and conn.connected():
conn.close()
if isinstance(request, ApiVersionRequest_v0):
# Starting from 0.10 kafka broker we determine version
# by looking at ApiVersionResponse
version = self._check_api_version_response(response)
return version
raise UnrecognizedBrokerVersion()
def _check_api_version_response(self, response):
# The logic here is to check the list of supported request versions
# in descending order. As soon as we find one that works, return it
test_cases = [
# format (<broker version>, <needed struct>)
((2, 3, 0), SyncGroupRequest[0].API_KEY, 3),
((2, 3, 0), FetchRequest[0].API_KEY, 11),
((2, 1, 0), MetadataRequest[0].API_KEY, 7),
((1, 1, 0), FetchRequest[0].API_KEY, 7),
((1, 0, 0), MetadataRequest[0].API_KEY, 5),
((0, 11, 0), MetadataRequest[0].API_KEY, 4),
((0, 10, 2), OffsetFetchRequest[0].API_KEY, 2),
((0, 10, 1), MetadataRequest[0].API_KEY, 2),
]
error_type = Errors.for_code(response.error_code)
assert error_type is Errors.NoError, "API version check failed"
max_versions = dict([
(api_key, max_version)
for api_key, _, max_version in response.api_versions
])
# Get the best match of test cases
for broker_version, api_key, version in test_cases:
if max_versions.get(api_key, -1) >= version:
return broker_version
# We know that ApiVersionResponse is only supported in 0.10+
# so if all else fails, choose that
return (0, 10, 0)
async def _wait_on_metadata(self, topic):
"""
Wait for cluster metadata including partitions for the given topic to
be available.
Arguments:
topic (str): topic we want metadata for
Returns:
set: partition ids for the topic
Raises:
UnknownTopicOrPartitionError: if no topic or partitions found
in cluster metadata
"""
if topic in self.cluster.topics():
return self.cluster.partitions_for_topic(topic)
# add topic to metadata topic list if it is not there already.
self.add_topic(topic)
t0 = time.monotonic()
while True:
await self.force_metadata_update()
if topic in self.cluster.topics():
break
if (time.monotonic() - t0) > (self._request_timeout_ms / 1000):
raise UnknownTopicOrPartitionError()
if topic in self.cluster.unauthorized_topics:
raise Errors.TopicAuthorizationFailedError(topic)
await asyncio.sleep(self._retry_backoff)
return self.cluster.partitions_for_topic(topic)
async def _maybe_wait_metadata(self):
if self._md_update_fut is not None:
await asyncio.shield(self._md_update_fut)
async def coordinator_lookup(self, coordinator_type, coordinator_key):
""" Lookup which node in the cluster is the coordinator for a certain
role (Transaction coordinator or Group coordinator atm.)
NOTE: Client keeps track of all coordination nodes separately, as they
all have different sockets and ids.
"""
node_id = self.get_random_node()
assert node_id is not None, "Did we not perform bootstrap?"
log.debug(
"Sending FindCoordinator request for key %s to broker %s",
coordinator_key, node_id)
if self.api_version > (0, 11):
request = FindCoordinatorRequest[1](
coordinator_key, coordinator_type)
else:
# Group coordination only
assert coordinator_type == CoordinationType.GROUP, \
"No transactions for older brokers"
request = FindCoordinatorRequest[0](coordinator_key)
resp = await self.send(node_id, request)
log.debug("Received group coordinator response %s", resp)
error_type = Errors.for_code(resp.error_code)
if error_type is not Errors.NoError:
err = error_type()
raise err
self.cluster.add_coordinator(
resp.coordinator_id, resp.host, resp.port, rack=None,
purpose=(coordinator_type, coordinator_key))
return resp.coordinator_id
| 40.541543
| 101
| 0.60505
|
da171f97851ae5d0f58aa3d0b1bb9b78258b0349
| 1,412
|
py
|
Python
|
tools/nntool/importer/onnx/handlers/backend/mul.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 118
|
2018-05-22T08:45:59.000Z
|
2022-03-30T07:00:45.000Z
|
tools/nntool/importer/onnx/handlers/backend/mul.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 213
|
2018-07-25T02:37:32.000Z
|
2022-03-30T18:04:01.000Z
|
tools/nntool/importer/onnx/handlers/backend/mul.py
|
00-01/gap_sdk
|
25444d752b26ccf0b848301c381692d77172852c
|
[
"Apache-2.0"
] | 76
|
2018-07-04T08:19:27.000Z
|
2022-03-24T09:58:05.000Z
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from graph.types.tensor_arithmetic import MatrixMulParameters
from ..backend_handler import BackendHandler
from ..handler import onnx_op
from .math_mixin import ArithmeticMixin
@onnx_op("Mul")
class Mul(ArithmeticMixin, BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
return super(Mul, cls)._common(node, params_class=MatrixMulParameters,
constant_operation=lambda x, y: x * y,
**kwargs)
@classmethod
def version_7(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
| 36.205128
| 78
| 0.706091
|
d1474f79fc8f33e52087301cb746b04cc0e61c19
| 1,565
|
py
|
Python
|
companies/models.py
|
miclemabasie/jobizar
|
1dde7aab4ba02de75517a5cf91d54b43ac41e3cb
|
[
"MIT"
] | null | null | null |
companies/models.py
|
miclemabasie/jobizar
|
1dde7aab4ba02de75517a5cf91d54b43ac41e3cb
|
[
"MIT"
] | null | null | null |
companies/models.py
|
miclemabasie/jobizar
|
1dde7aab4ba02de75517a5cf91d54b43ac41e3cb
|
[
"MIT"
] | null | null | null |
from django.db import models
from users.models import Profile
class Company(models.Model):
name = models.CharField(max_length=255)
line_of_engagement = models.CharField(max_length=255, null=True, blank=True)
location = models.CharField(max_length=255)
logo = models.ImageField(upload_to='companies', default="none.png", null=True, blank=True)
about = models.TextField()
link = models.CharField(max_length=1000, null=True, blank=True)
mail = models.EmailField(null=True, blank=True)
profile = models.OneToOneField(Profile, on_delete=models.CASCADE)
facebook = models.CharField(max_length=255, null=True, blank=True, help_text='Your facebook page link, Optional')
google = models.CharField(max_length=255, null=True, blank=True, help_text='Your google plus page link, Optional')
twitter = models.CharField(max_length=255, null=True, blank=True, help_text='Your twitter page link, Optional')
linkedin = models.CharField(max_length=255, null=True, blank=True, help_text='Your linkedin page link, Optional')
instagram = models.CharField(max_length=255, null=True, blank=True, help_text='Your facebook page link, Optional')
slug = models.SlugField(unique=True, null=True, blank=True)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
super().save()
self.name = str(self.name).upper()
class Meta:
verbose_name = 'companies'
def __str__(self):
return self.name
| 46.029412
| 118
| 0.722045
|
98ab713602c578de9e6bc2cb074f71d4843f4c82
| 54,919
|
py
|
Python
|
python/paddle/fluid/layers/tensor.py
|
honlinchen/Paddle
|
99d30bfc367d6b472d8b667656678f6b6d84db0c
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/tensor.py
|
honlinchen/Paddle
|
99d30bfc367d6b472d8b667656678f6b6d84db0c
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/layers/tensor.py
|
honlinchen/Paddle
|
99d30bfc367d6b472d8b667656678f6b6d84db0c
|
[
"Apache-2.0"
] | 1
|
2020-09-12T21:35:19.000Z
|
2020-09-12T21:35:19.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unlessf required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from six.moves import reduce
from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator
from ..framework import Variable
from ..initializer import Constant
from ..core import VarDesc
from .. import core
from .layer_function_generator import templatedoc
from . import utils
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
import numpy
import warnings
__all__ = [
'create_tensor', 'create_parameter', 'create_global_var', 'cast',
'tensor_array_to_tensor', 'concat', 'sums', 'assign',
'fill_constant_batch_size_like', 'fill_constant', 'argmin', 'argmax',
'argsort', 'ones', 'zeros', 'reverse', 'has_inf', 'has_nan', 'isfinite',
'range', 'linspace', 'zeros_like', 'ones_like', 'diag', 'eye'
]
def create_tensor(dtype, name=None, persistable=False):
"""
Create a variable, which will hold a Tensor with data type dtype.
Args:
dtype(string|numpy.dtype): the data type of Tensor to be created, the
data type is bool, float16, float32, float64, int8, int16, int32 and int64.
name(string, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
persistable(bool): Set the persistable flag of the create tensor.
default value is False.
Returns:
Variable: The tensor to be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tensor = fluid.layers.create_tensor(dtype='float32')
"""
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(
name=helper.name, dtype=dtype, persistable=persistable)
def create_parameter(shape,
dtype,
name=None,
attr=None,
is_bias=False,
default_initializer=None):
"""
This function creates a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
NOTE: this is a very low-level API. This API is useful when you create
operator by your self. instead of using layers.
Parameters:
shape (list of int): Shape of the parameter
dtype (str): Data type of the parameter
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
attr (ParamAttr, optional): Attributes of the parameter
is_bias (bool, optional): This can affect which default initializer is chosen
when default_initializer is None. If is_bias,
initializer.Constant(0.0) will be used. Otherwise,
Xavier() will be used.
default_initializer (Initializer, optional): Initializer for the parameter
Returns:
The created parameter.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
W = layers.create_parameter(shape=[784, 200], dtype='float32')
"""
helper = LayerHelper("create_parameter", **locals())
if attr is None:
attr = ParamAttr(name=name)
return helper.create_parameter(attr, shape, dtype, is_bias,
default_initializer)
def create_global_var(shape,
value,
dtype,
persistable=False,
force_cpu=False,
name=None):
"""
This function creates a new tensor variable with value in the global block(block 0).
Parameters:
shape (list of int): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype,
shape=shape,
persistable=persistable,
name=name,
stop_gradient=True)
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
return var
def cast(x, dtype):
"""
This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
to the output with :attr:`dtype`. It's meaningless if the output dtype
equals the input dtype, but it's fine if you do so.
Args:
x(Variable): An input N-D Tensor with data type bool, float16,
float32, float64, int32, int64, uint8.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output:
bool, float15, float32, float64, int8, int32, int64, uint8.
Returns:
Variable: A Tensor with the same shape as input's.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.core.CPUPlace()
x_lod = fluid.data(name="x", shape=[2,2], lod_level=0)
cast_res1 = fluid.layers.cast(x=x_lod, dtype="uint8")
cast_res2 = fluid.layers.cast(x=x_lod, dtype=np.int32)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x_i_lod = fluid.core.LoDTensor()
x_i_lod.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place)
x_i_lod.set_recursive_sequence_lengths([[0,2]])
res1 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res1], return_numpy=False)
res2 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res2], return_numpy=False)
print(np.array(res1[0]), np.array(res1[0]).dtype)
# [[ 1 254]
# [ 0 4]] uint8
print(np.array(res2[0]), np.array(res2[0]).dtype)
# [[ 1 -2]
# [ 0 4]] int32
"""
helper = LayerHelper('cast', **locals())
check_variable_and_dtype(
x, 'x',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'cast')
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype,
'out_dtype': out.dtype})
return out
def concat(input, axis=0, name=None):
"""
**Concat**
This OP concatenates the input along the axis.
Args:
input(list): List of input Tensors with data type float32, float64, int32,
int64.
axis(int32|Variable, optional): A scalar with type ``int32`` or a ``Tensor`` with shape [1] and type ``int32``. Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor with the same data type as input's.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[1,2,3],
[4,5,6]])
in2 = np.array([[11,12,13],
[14,15,16]])
in3 = np.array([[21,22],
[23,24]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
x2 = fluid.dygraph.to_variable(in2)
x3 = fluid.dygraph.to_variable(in3)
out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1)
out2 = fluid.layers.concat(input=[x1,x2], axis=0)
print(out1.numpy())
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
print(out2.numpy())
# [[ 1 2 3]
# [ 4 5 6]
# [11 12 13]
# [14 15 16]]
"""
if in_dygraph_mode():
if isinstance(axis, Variable):
axis = axis.numpy()
assert axis.shape == (
1, ), "axis of type Variable should have shape [1]"
axis = axis[0]
return core.ops.concat(input, 'axis', axis)
if not isinstance(input, list):
warnings.warn(
"The type of input in concat should be list, but received %s." %
(type(input)))
input = [input]
for id, x in enumerate(input):
check_variable_and_dtype(
x, 'input[' + str(id) + ']',
['float16', 'float32', 'float64', 'int32', 'int64'], 'concat')
check_type(axis, 'axis', (int, Variable), 'concat')
helper = LayerHelper('concat', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
"number of the elements must be 1, but received %s." % len(x)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input[0]},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': False})
else:
inputs = {'X': input}
attrs = {}
if isinstance(axis, Variable):
axis.stop_gradient = True
inputs['AxisTensor'] = axis
else:
attrs['axis'] = axis
helper.append_op(
type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
return out
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
"""
This function concatenates or stacks all tensors in the input LoDTensorArray
along the axis mentioned and returns that as the output.
For Example:
.. code-block:: text
Case 1:
Given:
input.data = {[[0.6, 0.1, 0.3],
[0.5, 0.3, 0.2]],
[[1.3],
[1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1, use_stack = False
Then:
output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
[0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]
output_index.data = [3, 1, 2]
Case 2:
Given:
input.data = {[[0.6, 0.1],
[0.5, 0.3]],
[[0.3, 1.3],
[0.2, 1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1, use_stack = True
Then:
output.data = [[[0.6, 0.1]
[0.3, 1.3]
[2.3, 2.1],
[[0.5, 0.3]
[0.2, 1.8]
[2.5, 2.4]]]
output_index.data = [2, 2, 2]
Args:
input(Variable): A LodTensorArray variable.
axis(int): The axis along which the tensors in attr::`input` will be
concatenated or stacked.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
use_stack(bool): Act as concat_op or stack_op. For stack mode, all
tensors in the tensor array must have the same shape.
Returns:
Variable: The concatenated or stacked tensor variable.
Variable: A 1-D tensor variable with int32 data type. The data in this \
tensor contains all input including tensors' sizes along the axis.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
array = fluid.layers.create_array(dtype='float32')
fluid.layers.array_write(x0, i, array)
fluid.layers.array_write(x1, i + 1, array)
output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
"""
helper = LayerHelper('tensor_array_to_tensor', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': use_stack})
return out, out_index
def sums(input, out=None):
"""
This function computes the sum of multiple input Tensors elementwisely.
- Case 1, sum of 3 Tensors
.. code-block:: text
# Input Tensors
x0.shape = [2, 3]
x0.data = [[1., 2., 3.],
[4., 5., 6.]]
x1.shape = [2, 3]
x1.data = [[10., 20., 30.],
[40., 50., 60.]]
x2.shape = [2, 3]
x2.data = [[100., 200., 300.],
[400., 500., 600.]]
# Output Tensor
out.shape = [2, 3]
out.data = [[111., 222., 333.],
[444., 555., 666.]]
Args:
input (list): A list of Variables which hold input Tensors with the same
data type and shape. Optional data types are: float32, float64, int32, int64.
out (Variable, optional): Output Tensor. It can be any existing Variable.
The default value is None, then a new Variable will be created and returned.
Returns:
Variable: The sum of inputs. The shape and data type is the same with input. \
If :code:`out` is not None, the returned value is :code:`out` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)
# Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
sum0 = fluid.layers.sums(input=[x0, x1, x2])
# Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
"""
helper = LayerHelper('sum', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='sum',
inputs={'X': input},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def assign(input, output=None):
"""
The OP copies the :attr:`input` to the :attr:`output`.
Parameters:
input (Variable|numpy.ndarray): A tensor or numpy ndarray, its data type supports
float32, float64, int32 and int64.
output (Variable, optional): A tensor. If :attr:`output` is None, a new tensor will
be created as :attr:`output`. Default: None.
Returns:
Variable: A tensor with the same shape, data type and value as :attr:`input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result1 = fluid.layers.create_tensor(dtype='float64')
fluid.layers.assign(data, result1) # result1 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result2 = fluid.layers.assign(data) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result3 = fluid.layers.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
"""
helper = LayerHelper('assign', **locals())
check_type(input, 'input', (Variable, numpy.ndarray), 'assign')
if isinstance(input, Variable):
check_dtype(input.dtype, 'input',
['float32', 'float64', 'int32', 'int64', 'bool'], 'assign',
'(When the type of input in assign is Variable.)')
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(
type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
elif isinstance(input, numpy.ndarray):
dtype = convert_np_dtype_to_dtype_(input.dtype)
if dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in input.flat]
elif dtype == VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in input.flat]
elif dtype == VarDesc.VarType.INT64:
value_name = "int64_values"
values = [int(v) for v in input.flat]
else:
raise TypeError(
"When the type of 'input' in assign is numpy.ndarray, "
"the data type of 'input' must be float32, int32 or int64, but "
"received %s." % convert_dtype(dtype))
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(
type='assign_value',
outputs={'Out': [output]},
attrs={
'dtype': dtype,
'shape': list(input.shape),
value_name: values
})
return output
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"""
This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specified by `value`.
The attribute `stop_gradient` of the created Tensor is set to True.
Args:
shape(list|tuple|Variable): Shape of the Tensor to be created.
The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Variable, it should be an 1-D Tensor .
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
be float16, float32, float64, int32, int64.
value(float): The constant value used to initialize the Tensor to be created.
force_cpu(True): data should be on CPU if it's true, default value is False.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
Returns:
Variable: Tensor which is created according to shape and dtype.
Raise:
TypeError: The dtype must be one of bool, float16, float32, float64, int32 and int64
and the data type of out Tensor must be the same as the dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# attr shape is a list which doesn't contain Variable Tensor.
data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
# data1=[[0], [0]] data2=[[5], [5]]
# attr shape is a list which contains Variable Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5]
# attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
"""
attrs = {'value': float(value), 'force_cpu': force_cpu}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
else:
attrs['str_value'] = str(float(value))
if in_dygraph_mode():
if isinstance(shape, (list, tuple)):
shape = list(
map(lambda x: x.numpy()[0] if isinstance(x, Variable) else x,
shape))
else:
shape = list(shape.numpy().astype(int))
dtype = convert_np_dtype_to_dtype_(dtype)
if out is None:
out = _varbase_creator(dtype=dtype)
core.ops.fill_constant(out, 'value',
float(value), 'force_cpu', force_cpu, 'dtype',
dtype, 'str_value', attrs['str_value'], 'shape',
shape)
out.stop_gradient = True
return out
helper = LayerHelper("fill_constant", **locals())
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'fill_constant')
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
inputs = {}
attrs = {'value': float(value), 'force_cpu': force_cpu}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
else:
attrs['str_value'] = str(float(value))
def _get_attr_shape(list_shape):
attr_shape = []
for idx, dim in enumerate(list_shape):
if isinstance(dim, Variable):
attr_shape.append(-1)
else:
attr_shape.append(dim)
return attr_shape
def _get_shape_tensor(list_shape):
new_shape_tensor = []
for idx, dim in enumerate(list_shape):
if isinstance(dim, Variable):
dim.stop_gradient = True
check_dtype(
dim.dtype, 'shape[' + str(idx) + ']', ['int32', 'int64'],
'fill_constant',
'(When type of shape in fill_constant is list or tuple.)')
if convert_dtype(dim.dtype) == 'int64':
dim = cast(x=dim, dtype='int32')
new_shape_tensor.append(dim)
else:
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor
if isinstance(shape, Variable):
shape.stop_gradient = True
check_dtype(shape.dtype, 'shape', ['int32', 'int64'], 'fill_constant',
'(When type of shape in fill_constant is Variable.)')
if (convert_dtype(shape.dtype) == 'int64'):
shape = cast(shape, 'int32')
inputs["ShapeTensor"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of 'shape' in fill_constant can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = _get_attr_shape(shape)
if utils._contain_var(shape):
inputs['ShapeTensorList'] = _get_shape_tensor(shape)
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
check_dtype(
dtype, 'create data type',
convert_dtype(out.dtype), 'fill_constant',
'(The create data type in fill_constant must be the same with out data type.)'
)
attrs['dtype'] = out.dtype
helper.append_op(
type='fill_constant',
inputs=inputs,
outputs={'Out': [out]},
attrs=attrs,
stop_gradient=True)
out.stop_gradient = True
return out
@templatedoc()
def fill_constant_batch_size_like(input,
shape,
dtype,
value,
input_dim_idx=0,
output_dim_idx=0,
force_cpu=False):
"""
This OP creates a Tesnor according the shape and dtype, and initializes the
Tensor with the constants provided in ``value``. When the input is LoDTensor
and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
of the batch_size input by the input, the Stop_gradient attribute of the created
Tensor is False by default.
Args:
input(Variable): Tensor which data type is float32, float64, int32 and int64.
shape(list): The shape of Tensor to be created, Tensor's shape may be changed
according the input.
dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
can be float32, float64, int32, int64.
value(float|int): The constant value used to initialize the Tensor to be created.
input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
dimension of the created Tensor is set to the batch_size value of input.
The default value is 0.
output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
the value of batch_size of input Tensor. The default value is 0.
force_cpu(bool): data should be on CPU if it's true, default value is False.
Returns:
Variable: Tensor which will be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
data = fluid.layers.fill_constant_batch_size_like(
input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
"""
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs = {
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'force_cpu': force_cpu
}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
else:
attrs['str_value'] = str(float(value))
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs=attrs)
out.stop_gradient = True
return out
def argmin(x, axis=0):
"""
**argmin**
This OP computes the indices of the min elements of the input tensor's
element along the provided axis.
Args:
x(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
Returns:
Variable: A Tensor with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argmin(x=x, axis=-1)
out2 = fluid.layers.argmin(x=x, axis=0)
out3 = fluid.layers.argmin(x=x, axis=1)
out4 = fluid.layers.argmin(x=x, axis=2)
print(out1.numpy())
# [[0 0 2]
# [1 0 2]]
print(out2.numpy())
# [[0 1 1 1]
# [0 0 0 0]
# [1 1 1 0]]
print(out3.numpy())
# [[1 1 1 2]
# [2 0 2 0]]
print(out4.numpy())
# [[0 0 2]
# [1 0 2]]
"""
helper = LayerHelper("arg_min", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='arg_min',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
out.stop_gradient = True
return out
def argmax(x, axis=0):
"""
**argmax**
This OP computes the indices of the max elements of the input tensor's
element along the provided axis.
Args:
x(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
Returns:
Variable: A Tensor with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argmax(x=x, axis=-1)
out2 = fluid.layers.argmax(x=x, axis=0)
out3 = fluid.layers.argmax(x=x, axis=1)
out4 = fluid.layers.argmax(x=x, axis=2)
print(out1.numpy())
# [[2 3 1]
# [0 3 1]]
print(out2.numpy())
# [[0 0 0 0]
# [1 1 1 1]
# [0 0 0 1]]
print(out3.numpy())
# [[2 2 0 1]
# [0 1 1 1]]
print(out4.numpy())
# [[2 3 1]
# [0 3 1]]
"""
helper = LayerHelper("arg_max", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='arg_max',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
out.stop_gradient = True
return out
def argsort(input, axis=-1, descending=False, name=None):
"""
This OP sorts the input along the given axis, and returns sorted output
data Varibale and its corresponding index Variable with the same shape as
:attr:`input`.
Args:
input(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
descending(bool, optional) : Descending is a flag, if set to true,
algorithm will sort by descending order, else sort by
ascending order. Default is false.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
tuple: A tuple of sorted data Variable(with the same shape and data
type as input) and the sorted indices(with the same shape as input's
and with data type int64).
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]]).astype(np.float32)
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argsort(input=x, axis=-1)
out2 = fluid.layers.argsort(input=x, axis=0)
out3 = fluid.layers.argsort(input=x, axis=1)
print(out1[0].numpy())
# [[[5. 5. 8. 9.]
# [0. 0. 1. 7.]
# [2. 4. 6. 9.]]
# [[2. 2. 4. 5.]
# [4. 7. 7. 9.]
# [0. 1. 6. 7.]]]
print(out1[1].numpy())
# [[[0 3 1 2]
# [0 1 2 3]
# [2 3 0 1]]
# [[1 3 2 0]
# [0 1 2 3]
# [2 0 3 1]]]
print(out2[0].numpy())
# [[[5. 2. 4. 2.]
# [0. 0. 1. 7.]
# [1. 7. 0. 4.]]
# [[5. 8. 9. 5.]
# [4. 7. 7. 9.]
# [6. 9. 2. 6.]]]
print(out3[0].numpy())
# [[[0. 0. 1. 4.]
# [5. 8. 2. 5.]
# [6. 9. 9. 7.]]
# [[1. 2. 0. 2.]
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
"""
helper = LayerHelper("argsort", **locals())
out = helper.create_variable_for_type_inference(
dtype=input.dtype, stop_gradient=True)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': input},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis,
'descending': descending})
return out, ids
def ones(shape, dtype, force_cpu=False):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
Its :attr:`stop_gradient` will be set to True to stop gradient computation.
Parameters:
shape (tuple|list): Shape of output tensor.
dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
bool, float16, float32, float64, int32 and int64.
force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
Default: False.
Returns:
Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
"""
assert isinstance(shape, list) or isinstance(
shape, tuple), "The shape's type should be list or tuple."
assert reduce(lambda x, y: x * y,
shape) > 0, "The shape is invalid: %s." % (str(shape))
return fill_constant(value=1.0, **locals())
def zeros(shape, dtype, force_cpu=False):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
Its :attr:`stop_gradient` will be set to True to stop gradient computation.
Parameters:
shape (tuple|list): Shape of output tensor.
dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
bool, float16, float32, float64, int32 and int64.
force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
Default: False.
Returns:
Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
"""
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'zeros')
return fill_constant(value=0.0, **locals())
def reverse(x, axis):
"""
The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
Parameters:
x (Variable): A tensor to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
will be apply on each axis in the tuple or list.
Returns:
Variable: The reversed tensor with the same shape and data type as :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
"""
if isinstance(axis, int):
axis = [axis]
helper = LayerHelper("reverse", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reverse',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def save(x, file_path, overwrite=True):
"""
Saves a variable as a file.
Args:
x(variable): The Tensor/LoDTensor to be saved.
file_path(str): The file path where the variable will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
"""
helper = LayerHelper("save", **locals())
helper.append_op(
type="save",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def save_combine(x, file_path, overwrite=True):
"""
Saves a list of variables into a single file.
Args:
x(list): A list of Tensor/LoDTensor variables to be saved together in
a single file.
file_path(str): The file path where variables will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
Returns:
There is no return value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
v1 = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
v2 = fluid.layers.data(name="data",
shape=(6, 8, 4),
dtype="float32")
normed = fluid.layers.save_combine([v1, v2], file_path="output")
"""
helper = LayerHelper("save_combine", **locals())
helper.append_op(
type="save_combine",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def load_combine(out, file_path):
"""
Loads a list of variable from a single file.
Args:
out(list): The list of variables to be read from the disk file.
file_path(str): The path of the disk file.
"""
helper = LayerHelper("load_combine", **locals())
helper.append_op(
type="load_combine",
inputs={},
output={"Out": out},
args={"file_path": file_path})
def has_inf(x):
"""
Test if any of x contains an infinity number
Args:
x (Variable): The Tensor/LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, only a bool value, indicating that whether there is infinity number in x or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
res = fluid.layers.has_inf(data)
"""
helper = LayerHelper("isinf", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
return out
def has_nan(x):
"""
Test if any of x contains a NAN
Args:
x (Variable): The Tensor/LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
res = fluid.layers.has_nan(data)
"""
helper = LayerHelper("isnan", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
return out
def isfinite(x):
"""
Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false.
Args:
x(variable): The Tensor/LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, contains a bool value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
var = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
out = fluid.layers.isfinite(var)
"""
helper = LayerHelper("isfinite", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
return out
def range(start, end, step, dtype):
"""
Return evenly spaced values within a given interval.
Values are generated within the half-open interval [start, stop) (in other words,
the interval including start but excluding stop).
Parameters:
start(float32 | float64 | int32 | int64 | Variable): Start of interval. The interval includes this value.
when start is Variable, it is a 1-D Tensor with shape [1].
end(float32 | float64 | int32 | int64 | Variable): End of interval. The interval does not include this
value, except in some cases where step is not an integer
and floating point round-off affects the length of out. When end is Variable,
it is a 1-D Tensor with shape [1].
step(float32 | float64 | int32 | int64 | Variable): Spacing between values. For any output out, this is the
distance between two adjacent values, out[i+1] - out[i].
dtype(str|core.VarDesc.VarType): the data type of the output tensor, can be float32, float64, int32, int64.
Returns: a 1-D Tensor which is evenly spaced values within a given interval. Its data type is set by dtype.
Return type: Variable
examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.range(0, 10, 2, 'int32')
"""
helper = LayerHelper("range", **locals())
check_dtype(dtype, 'create data type',
['float32', 'float64', 'int32', 'int64'], 'range')
dtype = convert_dtype(dtype)
if not isinstance(start, Variable):
start = fill_constant([1], dtype, start)
elif convert_dtype(start.dtype) != dtype:
# make sure that start, end, step has the same dtype as
# `dtype`
start = cast(x=start, dtype=dtype)
if not isinstance(end, Variable):
end = fill_constant([1], dtype, end)
elif convert_dtype(end.dtype) != dtype:
end = cast(x=end, dtype=dtype)
if not isinstance(step, Variable):
step = fill_constant([1], dtype, step)
elif convert_dtype(step.dtype) != dtype:
step = cast(x=step, dtype=dtype)
out = helper.create_variable_for_type_inference(dtype=start.dtype)
helper.append_op(
type='range',
inputs={'Start': start,
'End': end,
'Step': step},
outputs={'Out': [out]})
out.stop_gradient = True
return out
def linspace(start, stop, num, dtype):
"""
This OP return fixed number of evenly spaced values within a given interval.
Args:
start(float|Variable): The input :attr:`start` is start variable of range. It is a float scalar, \
or a tensor of shape [1] with input data type float32, float64.
stop(float|Variable): The input :attr:`stop` is start variable of range. It is a float scalar, \
or a tensor of shape [1] with input data type float32, float64.
num(int|Variable): The input :attr:`num` is given num of the sequence. It is an int scalar, \
or a tensor of shape [1] with type int32.
dtype(string): The data type of output tensor, it could be 'float32' and 'float64'.
Returns:
Variable, the output data type will be float32, float64.: The 1-D tensor with fixed number of evenly spaced values, \
the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
the value with input :attr:`start`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0]
data = fluid.layers.linspace(0, 10, 1, 'float32') # [0.0]
"""
helper = LayerHelper("linspace", **locals())
if not isinstance(start, Variable):
start = fill_constant([1], dtype, start)
if not isinstance(stop, Variable):
stop = fill_constant([1], dtype, stop)
if not isinstance(num, Variable):
num = fill_constant([1], 'int32', num)
out = helper.create_variable_for_type_inference(dtype=start.dtype)
helper.append_op(
type='linspace',
inputs={'Start': start,
'Stop': stop,
'Num': num},
outputs={'Out': [out]})
return out
def zeros_like(x, out=None):
"""
This OP creates a zeros tensor which has identical shape and dtype
with `x`.
Args:
x(Variable): The input tensor which specifies shape and dtype, the input data dtype could be bool, float32, float64, int32, int64.
out(Variable, optional): If is :attr:`None` , the op will create the variable as output, the data type and shape of \
this variable will be same as input :attr:`x`. If is a tensor, the data type and shape need to be same as input :attr:`x`.
The default value is :attr:`None` .
Returns:
Variable: The N-D tensor, the element in tensor is related to input data type, if the input data type is bool, \
the output value is False, otherwise is zero. The output shape is the same as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', dtype='float32', shape=[3])
data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]
"""
helper = LayerHelper("zeros_like", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
out.stop_gradient = True
return out
def diag(diagonal):
"""
This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Args:
diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Returns:
Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Examples:
.. code-block:: python
# [[3, 0, 0]
# [0, 4, 0]
# [0, 0, 5]
import paddle.fluid as fluid
import numpy as np
diagonal = np.arange(3, 6, dtype='int32')
data = fluid.layers.diag(diagonal)
# diagonal.shape=(3,) data.shape=(3, 3)
"""
helper = LayerHelper("diag", **locals())
if not isinstance(diagonal, Variable):
diagonal = assign(diagonal)
out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)
helper.append_op(
type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})
out.stop_gradient = True
return out
def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
"""
**eye**
This function constructs an identity tensor, or a batch of tensor.
Args:
num_rows(int): the number of rows in each batch tensor.
num_columns(int): the number of columns in each batch tensor.
If None, default: num_rows.
batch_shape(list(int)): If provided, the returned tensor will have a leading
batch size of this shape.
dtype(string): The data type of the returned tensor.
It should be int32, int64, float16, float32, float64.
Returns:
Variable: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.eye(3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]
# [0, 0, 1]]
data = fluid.layers.eye(2, 3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]]
data = fluid.layers.eye(2, batch_shape=[3])
# Construct a batch of 3 identity tensors, each 2 x 2.
# data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.
"""
helper = LayerHelper("eye", **locals())
if not isinstance(num_rows, int) or num_rows < 0:
raise TypeError("num_rows should be a non-negative int")
if num_columns is not None:
if not isinstance(num_columns, int) or num_columns < 0:
raise TypeError("num_columns should be a non-negative int")
else:
num_columns = num_rows
out = helper.create_variable_for_type_inference(dtype=dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='eye',
inputs={},
outputs={'Out': [out]},
attrs={
'num_rows': num_rows,
'num_columns': num_columns,
'dtype': c_dtype
},
stop_gradient=True)
out.stop_gradient = True
if batch_shape is not None:
if not isinstance(batch_shape, list):
raise TypeError("batch_shape should be a list")
from .nn import stack
for batch_val in reversed(batch_shape):
if batch_val <= 0:
raise TypeError("batch_shape should be a positive int list")
else:
stack_vars = [out for _ in numpy.arange(batch_val)]
out = stack(stack_vars, axis=0)
return out
def ones_like(x, out=None):
"""
**ones_like**
This function creates a ones tensor which has identical shape and dtype
with `x`.
Args:
x(Variable): The input tensor which specifies shape and dtype.
out(Variable): The output tensor.
Returns:
out(Variable): The tensor variable storing the output.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]
"""
helper = LayerHelper("ones_like", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='fill_any_like',
inputs={'X': [x]},
attrs={'value': 1.0},
outputs={'Out': [out]})
return out
| 37.385296
| 171
| 0.563029
|
789bcf3999beabe4b78b031c82dd0da11d13971d
| 4,070
|
py
|
Python
|
testing/legion/examples/http_example/http_test.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
testing/legion/examples/http_example/http_test.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
testing/legion/examples/http_example/http_test.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import logging
import os
import socket
import sys
import time
TESTING_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', '..')
sys.path.append(TESTING_DIR)
from legion import legion_test_case
class HttpTest(legion_test_case.TestCase):
"""Example HTTP test case."""
@classmethod
def GetArgs(cls):
"""Get command line args."""
parser = argparse.ArgumentParser()
parser.add_argument('--http-server')
parser.add_argument('--http-client')
parser.add_argument('--os', default='Ubuntu-14.04')
args, _ = parser.parse_known_args()
return args
@classmethod
def CreateTask(cls, name, task_hash, os_type):
"""Create a new task."""
#pylint: disable=unexpected-keyword-arg,no-value-for-parameter
#pylint: disable=arguments-differ
task = super(HttpTest, cls).CreateTask(
name=name,
isolated_hash=task_hash,
dimensions={'os': os_type})
task.Create()
return task
@classmethod
def setUpClass(cls):
"""Creates the task machines and waits until they connect."""
args = cls.GetArgs()
cls.http_server = cls.CreateTask(
'http_server', args.http_server, args.os)
cls.http_client = cls.CreateTask(
'http_client', args.http_client, args.os)
cls.http_server.WaitForConnection()
cls.http_client.WaitForConnection()
def CanConnectToServerPort(self, server_port):
"""Connect to a port on the http_server.
Returns:
True if the connection succeeded, False otherwise.
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.http_server.ip_address, server_port))
return True
except socket.error:
return False
def FindOpenPortOnServer(self):
"""Find an open port on the server and return it.
Returns:
The value of an open port.
"""
for server_port in xrange(2000, 20000):
if not self.CanConnectToServerPort(server_port):
return server_port
self.fail('Unable to find an open port on the server.')
def StartServer(self, server_port):
"""Starts the http_server process.
Returns:
The server process.
"""
def WaitForServer():
timeout = time.time() + 5
while timeout > time.time():
if self.CanConnectToServerPort(server_port):
return
self.fail('Server process failed to start')
cmd = [
self.http_server.executable,
'http_server.py',
'--port', str(server_port)
]
proc = self.http_server.Process(cmd)
WaitForServer()
return proc
def StartClient(self, server_port):
"""Starts the http_client process.
Returns:
The client process.
"""
cmd = [
self.http_client.executable,
'http_client.py',
'--server', self.http_server.ip_address,
'--port', str(server_port)
]
return self.http_client.Process(cmd)
def testHttpWorks(self):
"""Tests that the client process can talk to the server process."""
server_proc = None
client_proc = None
try:
server_port = self.FindOpenPortOnServer()
logging.info('Starting server at %s:%s', self.http_server.ip_address,
server_port)
server_proc = self.StartServer(server_port)
logging.info('Connecting to server at %s:%s', self.http_server.ip_address,
server_port)
client_proc = self.StartClient(server_port)
client_proc.Wait()
logging.info('client_proc.stdout: %s', client_proc.ReadStdout())
logging.info('client_proc.stderr: %s', client_proc.ReadStderr())
self.assertEqual(client_proc.GetReturncode(), 0)
finally:
if server_proc:
server_proc.Kill()
server_proc.Delete()
if client_proc:
client_proc.Kill()
client_proc.Delete()
if __name__ == '__main__':
legion_test_case.main()
| 28.661972
| 80
| 0.660934
|
a975e20dea25fa8ed21cee0a6272199672e98ba7
| 20,927
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv6_acl_datatypes.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv6_acl_datatypes.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv6_acl_datatypes.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-07-22T04:04:44.000Z
|
2020-07-22T04:04:44.000Z
|
""" Cisco_IOS_XR_ipv6_acl_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Ipv6AclDscpNumber(Enum):
"""
Ipv6AclDscpNumber (Enum Class)
Ipv6 acl dscp number
.. data:: default = 0
Default DSCP
.. data:: af11 = 10
Match packets with AF11 DSCP
.. data:: af12 = 12
Match packets with AF12 DSCP
.. data:: af13 = 14
Match packets with AF13 DSCP
.. data:: af21 = 18
Match packets with AF21 DSCP
.. data:: af22 = 20
Match packets with AF22 DSCP
.. data:: af23 = 22
Match packets with AF23 DSCP
.. data:: af31 = 26
Match packets with AF31 DSCP
.. data:: af32 = 28
Match packets with AF32 DSCP
.. data:: af33 = 30
Match packets with AF33 DSCP
.. data:: af41 = 34
Match packets with AF41 DSCP
.. data:: af42 = 36
Match packets with AF42 DSCP
.. data:: af43 = 38
Match packets with AF43 DSCP
.. data:: cs1 = 8
Match packets with CS1 (precedence 1) DSCP
.. data:: cs2 = 16
Match packets with CS2 (precedence 2) DSCP
.. data:: cs3 = 24
Match packets with CS3 (precedence 3) DSCP
.. data:: cs4 = 32
Match packets with CS4 (precedence 4) DSCP
.. data:: cs5 = 40
Match packets with CS5 (precedence 5) DSCP
.. data:: cs6 = 48
Match packets with CS6 (precedence 6) DSCP
.. data:: cs7 = 56
Match packets with CS7 (precedence 7) DSCP
.. data:: ef = 46
Match packets with EF DSCP
"""
default = Enum.YLeaf(0, "default")
af11 = Enum.YLeaf(10, "af11")
af12 = Enum.YLeaf(12, "af12")
af13 = Enum.YLeaf(14, "af13")
af21 = Enum.YLeaf(18, "af21")
af22 = Enum.YLeaf(20, "af22")
af23 = Enum.YLeaf(22, "af23")
af31 = Enum.YLeaf(26, "af31")
af32 = Enum.YLeaf(28, "af32")
af33 = Enum.YLeaf(30, "af33")
af41 = Enum.YLeaf(34, "af41")
af42 = Enum.YLeaf(36, "af42")
af43 = Enum.YLeaf(38, "af43")
cs1 = Enum.YLeaf(8, "cs1")
cs2 = Enum.YLeaf(16, "cs2")
cs3 = Enum.YLeaf(24, "cs3")
cs4 = Enum.YLeaf(32, "cs4")
cs5 = Enum.YLeaf(40, "cs5")
cs6 = Enum.YLeaf(48, "cs6")
cs7 = Enum.YLeaf(56, "cs7")
ef = Enum.YLeaf(46, "ef")
class Ipv6AclGrantEnum(Enum):
"""
Ipv6AclGrantEnum (Enum Class)
Ipv6 acl grant enum
.. data:: deny = 0
Deny
.. data:: permit = 1
Permit
"""
deny = Enum.YLeaf(0, "deny")
permit = Enum.YLeaf(1, "permit")
class Ipv6AclIcmpTypeCodeEnum(Enum):
"""
Ipv6AclIcmpTypeCodeEnum (Enum Class)
Ipv6 acl icmp type code enum
.. data:: no_route_to_destination = 65536
No route to destination
.. data:: administratively_prohibited = 65537
Administratively prohibited
.. data:: beyond_scope_of_source_address = 65538
Unreachable beyond scope of address
.. data:: host_unreachable = 65539
Host unreachable
.. data:: port_unreachable = 65540
Port unreachable
.. data:: unreachable = 131071
All unreachables
.. data:: packet_too_big = 131072
packet too big
.. data:: ttl_exceeded = 196608
TTL exceeded
.. data:: reassembly_timeout = 196609
Reassembly timeout
.. data:: time_exceeded = 262143
All time exceeds
.. data:: erronenous_header_field = 262144
Erroneous header field
.. data:: option_missing = 262145
Parameter required but not present
.. data:: no_room_for_option = 262146
Parameter required but no room
.. data:: parameter_problem = 327679
All parameter problems
.. data:: echo = 8388608
Echo ping
.. data:: echo_reply = 8454144
Echo reply
.. data:: group_membership_query = 8585215
Multicast listener query
.. data:: group_membership_report = 8650751
Multicast listener report
.. data:: group_membership_reduction = 8716287
Multicast listener done
.. data:: router_solicitation = 8716288
Router discovery solicitations
.. data:: router_advertisement = 8781824
Router discovery advertisements
.. data:: neighbor_solicitation = 8847360
Neighbor discovery neighbor solicitations
.. data:: neighbor_advertisement = 8912896
Neighbor discovery neighbor advertisements
.. data:: redirect = 8978432
All redirects
.. data:: rr_command = 9043968
Router renumbering command
.. data:: rr_result = 9043969
Router renumbering result
.. data:: rr_seqnum_reset = 9044223
Router renumbering seqnum
.. data:: router_renumbering = 9109503
Router renumbering
.. data:: query_subject_is_ipv6_address = 9109504
Query subject is ipv6 address
.. data:: query_subject_is_domain_name = 9109505
Query subject is domain name
.. data:: query_subject_is_ipv4_address = 9109506
Query subject is ipv4 address
.. data:: who_are_you_request = 9175039
Who are you request
.. data:: node_information_successful_reply = 9175040
Node information successful reply
.. data:: node_information_request_is_refused = 9175041
Node information reply rejected
.. data:: unknown_query_type = 9175042
Unknown query type
.. data:: who_are_you_reply = 9240575
Who are you reply
"""
no_route_to_destination = Enum.YLeaf(65536, "no-route-to-destination")
administratively_prohibited = Enum.YLeaf(65537, "administratively-prohibited")
beyond_scope_of_source_address = Enum.YLeaf(65538, "beyond-scope-of-source-address")
host_unreachable = Enum.YLeaf(65539, "host-unreachable")
port_unreachable = Enum.YLeaf(65540, "port-unreachable")
unreachable = Enum.YLeaf(131071, "unreachable")
packet_too_big = Enum.YLeaf(131072, "packet-too-big")
ttl_exceeded = Enum.YLeaf(196608, "ttl-exceeded")
reassembly_timeout = Enum.YLeaf(196609, "reassembly-timeout")
time_exceeded = Enum.YLeaf(262143, "time-exceeded")
erronenous_header_field = Enum.YLeaf(262144, "erronenous-header-field")
option_missing = Enum.YLeaf(262145, "option-missing")
no_room_for_option = Enum.YLeaf(262146, "no-room-for-option")
parameter_problem = Enum.YLeaf(327679, "parameter-problem")
echo = Enum.YLeaf(8388608, "echo")
echo_reply = Enum.YLeaf(8454144, "echo-reply")
group_membership_query = Enum.YLeaf(8585215, "group-membership-query")
group_membership_report = Enum.YLeaf(8650751, "group-membership-report")
group_membership_reduction = Enum.YLeaf(8716287, "group-membership-reduction")
router_solicitation = Enum.YLeaf(8716288, "router-solicitation")
router_advertisement = Enum.YLeaf(8781824, "router-advertisement")
neighbor_solicitation = Enum.YLeaf(8847360, "neighbor-solicitation")
neighbor_advertisement = Enum.YLeaf(8912896, "neighbor-advertisement")
redirect = Enum.YLeaf(8978432, "redirect")
rr_command = Enum.YLeaf(9043968, "rr-command")
rr_result = Enum.YLeaf(9043969, "rr-result")
rr_seqnum_reset = Enum.YLeaf(9044223, "rr-seqnum-reset")
router_renumbering = Enum.YLeaf(9109503, "router-renumbering")
query_subject_is_ipv6_address = Enum.YLeaf(9109504, "query-subject-is-ipv6-address")
query_subject_is_domain_name = Enum.YLeaf(9109505, "query-subject-is-domain-name")
query_subject_is_ipv4_address = Enum.YLeaf(9109506, "query-subject-is-ipv4-address")
who_are_you_request = Enum.YLeaf(9175039, "who-are-you-request")
node_information_successful_reply = Enum.YLeaf(9175040, "node-information-successful-reply")
node_information_request_is_refused = Enum.YLeaf(9175041, "node-information-request-is-refused")
unknown_query_type = Enum.YLeaf(9175042, "unknown-query-type")
who_are_you_reply = Enum.YLeaf(9240575, "who-are-you-reply")
class Ipv6AclLoggingEnum(Enum):
"""
Ipv6AclLoggingEnum (Enum Class)
Ipv6 acl logging enum
.. data:: log = 1
Log matches against this entry
.. data:: log_input = 2
Log matches against this entry, including input
interface
"""
log = Enum.YLeaf(1, "log")
log_input = Enum.YLeaf(2, "log-input")
class Ipv6AclOperatorEnum(Enum):
"""
Ipv6AclOperatorEnum (Enum Class)
Ipv6 acl operator enum
.. data:: equal = 1
Match only packets on a given port number
.. data:: greater_than = 2
Match only packet with a greater port number
.. data:: less_than = 3
Match only packet with a lower port number
.. data:: not_equal = 4
Match only packets not on a given port number
.. data:: range = 5
Match only packets in the range of port numbers
"""
equal = Enum.YLeaf(1, "equal")
greater_than = Enum.YLeaf(2, "greater-than")
less_than = Enum.YLeaf(3, "less-than")
not_equal = Enum.YLeaf(4, "not-equal")
range = Enum.YLeaf(5, "range")
class Ipv6AclPortNumber(Enum):
"""
Ipv6AclPortNumber (Enum Class)
Ipv6 acl port number
.. data:: echo = 7
Match on the 'echo' port number
.. data:: discard = 9
Match on the 'discard' port number
.. data:: daytime = 13
Match on the 'daytime' port number (TCP/SCTP
only)
.. data:: char_gen = 19
Match on the 'chargen' port number (TCP/SCTP
only)
.. data:: ftp_data = 20
Match on the FTP data connections port number
(TCP/SCTP only)
.. data:: ftp = 21
Match on the 'ftp' port number (TCP/SCTP only)
.. data:: telnet = 23
Match on the 'telnet' port number (TCP/SCTP
only)
.. data:: smtp = 25
Match on the 'smtp' port number (TCP/SCTP
only)
.. data:: time = 37
Match on the 'time' port number
.. data:: name_server = 42
Match on the IEN116 name service port number
(UDP only)
.. data:: who_is = 43
Match on the 'nicname' port number (TCP/SCTP
only)
.. data:: tacacs = 49
Match on the 'tacacs' port number
.. data:: dns = 53
Match on the 'dns' port number
.. data:: boot_ps = 67
Match on the Bootstrap Protocol server port
number (UDP only)
.. data:: boot_pc = 68
Match on the Bootstrap Protocol client port
number (UDP only)
.. data:: tftp = 69
Match on the 'tftp' port number (UDP only)
.. data:: gopher = 70
Match on the 'gopher' port number (TCP/SCTP
only)
.. data:: finger = 79
Match on the 'finger' port number (TCP/SCTP
only)
.. data:: www = 80
Match on the 'http' port number (TCP/SCTP
only)
.. data:: host_name = 101
Match on the NIC hostname server port number
(TCP/SCTP only)
.. data:: pop2 = 109
Match on the 'pop2' port number (TCP/SCTP
only)
.. data:: pop3 = 110
Match on the 'pop3' port number (TCP/SCTP
only)
.. data:: sun_rpc = 111
Match on the Sun RPC port number
.. data:: ident = 113
Match on the 'ident' port number (TCP/SCTP
only)
.. data:: nntp = 119
Match on the 'nntp' port number (TCP/SCTP
only)
.. data:: ntp = 123
Match on the 'ntp' port number (UDP only)
.. data:: net_bios_ns = 137
Match on the NetBIOS name service port number
(UDP only)
.. data:: net_bios_dgs = 138
Match on the NetBIOS datagram service port
number (UDP only)
.. data:: net_bios_ss = 139
Match on the NetBIOS session service port
number (UDP only)
.. data:: snmp = 161
Match on the 'snmp' port number (UDP only)
.. data:: snmp_trap = 162
Match on the SNMP traps port number (UDP only)
.. data:: xdmcp = 177
Match on the 'xdmcp' port number (UDP only)
.. data:: bgp = 179
Match on the 'bgp' port number (TCP/SCTP only)
.. data:: irc = 194
Match on the 'irc' port number (TCP/SCTP only)
.. data:: dnsix = 195
Match on the DNSIX security protocol auditing
port number (UDP only)
.. data:: mobile_ip = 434
Match on the mobile IP registration port
number (UDP only)
.. data:: pim_auto_rp = 496
Match on the PIM Auto-RP port number
.. data:: isakmp = 500
Match on the 'isakmp' port number (UDP only)
.. data:: exec_or_biff = 512
Match on the port used by TCP/SCTP for 'exec'
and by UDP for 'biff'
.. data:: login_or_who = 513
Match on the port used by TCP/SCTP for 'login'
and by UDP for 'rwho'
.. data:: cmd_or_syslog = 514
Match on the port used by TCP/SCTP for 'rcmd'
and by UDP for 'syslog'
.. data:: lpd = 515
Match on the 'lpd' port number (TCP/SCTP only)
.. data:: talk = 517
Match on the 'talk' port number
.. data:: rip = 520
Match on the 'rip' port number (UDP only)
.. data:: uucp = 540
Match on the 'uucp' port number (TCP/SCTP
only)
.. data:: klogin = 543
Match on the Kerberos login port number
(TCP/SCTP only)
.. data:: kshell = 544
Match on the Kerberos shell port number
(TCP/SCTP only)
.. data:: ldp = 646
Match on the LDP port
"""
echo = Enum.YLeaf(7, "echo")
discard = Enum.YLeaf(9, "discard")
daytime = Enum.YLeaf(13, "daytime")
char_gen = Enum.YLeaf(19, "char-gen")
ftp_data = Enum.YLeaf(20, "ftp-data")
ftp = Enum.YLeaf(21, "ftp")
telnet = Enum.YLeaf(23, "telnet")
smtp = Enum.YLeaf(25, "smtp")
time = Enum.YLeaf(37, "time")
name_server = Enum.YLeaf(42, "name-server")
who_is = Enum.YLeaf(43, "who-is")
tacacs = Enum.YLeaf(49, "tacacs")
dns = Enum.YLeaf(53, "dns")
boot_ps = Enum.YLeaf(67, "boot-ps")
boot_pc = Enum.YLeaf(68, "boot-pc")
tftp = Enum.YLeaf(69, "tftp")
gopher = Enum.YLeaf(70, "gopher")
finger = Enum.YLeaf(79, "finger")
www = Enum.YLeaf(80, "www")
host_name = Enum.YLeaf(101, "host-name")
pop2 = Enum.YLeaf(109, "pop2")
pop3 = Enum.YLeaf(110, "pop3")
sun_rpc = Enum.YLeaf(111, "sun-rpc")
ident = Enum.YLeaf(113, "ident")
nntp = Enum.YLeaf(119, "nntp")
ntp = Enum.YLeaf(123, "ntp")
net_bios_ns = Enum.YLeaf(137, "net-bios-ns")
net_bios_dgs = Enum.YLeaf(138, "net-bios-dgs")
net_bios_ss = Enum.YLeaf(139, "net-bios-ss")
snmp = Enum.YLeaf(161, "snmp")
snmp_trap = Enum.YLeaf(162, "snmp-trap")
xdmcp = Enum.YLeaf(177, "xdmcp")
bgp = Enum.YLeaf(179, "bgp")
irc = Enum.YLeaf(194, "irc")
dnsix = Enum.YLeaf(195, "dnsix")
mobile_ip = Enum.YLeaf(434, "mobile-ip")
pim_auto_rp = Enum.YLeaf(496, "pim-auto-rp")
isakmp = Enum.YLeaf(500, "isakmp")
exec_or_biff = Enum.YLeaf(512, "exec-or-biff")
login_or_who = Enum.YLeaf(513, "login-or-who")
cmd_or_syslog = Enum.YLeaf(514, "cmd-or-syslog")
lpd = Enum.YLeaf(515, "lpd")
talk = Enum.YLeaf(517, "talk")
rip = Enum.YLeaf(520, "rip")
uucp = Enum.YLeaf(540, "uucp")
klogin = Enum.YLeaf(543, "klogin")
kshell = Enum.YLeaf(544, "kshell")
ldp = Enum.YLeaf(646, "ldp")
class Ipv6AclPrecedenceNumber(Enum):
"""
Ipv6AclPrecedenceNumber (Enum Class)
Ipv6 acl precedence number
.. data:: critical = 5
Match packets with critical precedence
.. data:: flash = 3
Match packets with flash precedence
.. data:: flash_override = 4
Match packets with flash override precedence
.. data:: immediate = 2
Match packets with immediate precedence
.. data:: internet = 6
Match packets with internetwork control
precedence
.. data:: network = 7
Match packets with network control precedence
.. data:: priority = 1
Match packets with priority precedence
.. data:: routine = 0
Match packets with routine precedence
"""
critical = Enum.YLeaf(5, "critical")
flash = Enum.YLeaf(3, "flash")
flash_override = Enum.YLeaf(4, "flash-override")
immediate = Enum.YLeaf(2, "immediate")
internet = Enum.YLeaf(6, "internet")
network = Enum.YLeaf(7, "network")
priority = Enum.YLeaf(1, "priority")
routine = Enum.YLeaf(0, "routine")
class Ipv6AclProtocolNumber(Enum):
"""
Ipv6AclProtocolNumber (Enum Class)
Ipv6 acl protocol number
.. data:: ip = 0
Any IP protocol
.. data:: icmp = 1
Internet Control Message Protocol
.. data:: igmp = 2
Internet Gateway Message Protocol
.. data:: ip_in_ip = 4
IP in IP tunneling
.. data:: tcp = 6
Transport Control Protocol
.. data:: igrp = 9
Cisco's IGRP Routing Protocol
.. data:: udp = 17
User Datagram Protocol
.. data:: gre = 47
Cisco's GRE tunneling
.. data:: esp = 50
Encapsulation Security Protocol
.. data:: ahp = 51
Authentication Header Protocol
.. data:: icmpv6 = 58
Internet Control Message Protocol
.. data:: eigrp = 88
Cisco's EIGRP Routing Protocol
.. data:: ospf = 89
OSPF Routing Protocol
.. data:: nos = 94
KA9Q NOS Compatible IP over IP tunneling
.. data:: pim = 103
Protocol Independent Multicast
.. data:: pcp = 108
Payload Compression Protocol
.. data:: sctp = 132
Stream Control Transmission Protocol
"""
ip = Enum.YLeaf(0, "ip")
icmp = Enum.YLeaf(1, "icmp")
igmp = Enum.YLeaf(2, "igmp")
ip_in_ip = Enum.YLeaf(4, "ip-in-ip")
tcp = Enum.YLeaf(6, "tcp")
igrp = Enum.YLeaf(9, "igrp")
udp = Enum.YLeaf(17, "udp")
gre = Enum.YLeaf(47, "gre")
esp = Enum.YLeaf(50, "esp")
ahp = Enum.YLeaf(51, "ahp")
icmpv6 = Enum.YLeaf(58, "icmpv6")
eigrp = Enum.YLeaf(88, "eigrp")
ospf = Enum.YLeaf(89, "ospf")
nos = Enum.YLeaf(94, "nos")
pim = Enum.YLeaf(103, "pim")
pcp = Enum.YLeaf(108, "pcp")
sctp = Enum.YLeaf(132, "sctp")
class Ipv6AclStatusEnum(Enum):
"""
Ipv6AclStatusEnum (Enum Class)
Ipv6 acl status enum
.. data:: disabled = 0
Disabled
.. data:: enabled = 1
Enabled
"""
disabled = Enum.YLeaf(0, "disabled")
enabled = Enum.YLeaf(1, "enabled")
class Ipv6AclTcpBitsNumber(Enum):
"""
Ipv6AclTcpBitsNumber (Enum Class)
Ipv6 acl tcp bits number
.. data:: established = 20
Match established connections (0x14)
.. data:: ack = 16
Match on the ACK bit (0x10)
.. data:: rst = 4
Match on the RST bit (0x04)
.. data:: fin = 1
Match on the FIN bit (0x01)
.. data:: psh = 8
Match on the PSH bit (0x08)
.. data:: syn = 2
Match on the SYN bit (0x02)
.. data:: urg = 32
Match on the URG bit (0x20)
"""
established = Enum.YLeaf(20, "established")
ack = Enum.YLeaf(16, "ack")
rst = Enum.YLeaf(4, "rst")
fin = Enum.YLeaf(1, "fin")
psh = Enum.YLeaf(8, "psh")
syn = Enum.YLeaf(2, "syn")
urg = Enum.YLeaf(32, "urg")
class Ipv6AclTcpMatchOperatorEnum(Enum):
"""
Ipv6AclTcpMatchOperatorEnum (Enum Class)
Ipv6 acl tcp match operator enum
.. data:: match_all = 1
Match only packet with all the given TCP bits
.. data:: match_any = 3
Match only packet with any of the given TCP
bits
"""
match_all = Enum.YLeaf(1, "match-all")
match_any = Enum.YLeaf(3, "match-any")
class Ipv6AclTypeEnum(Enum):
"""
Ipv6AclTypeEnum (Enum Class)
Ipv6 acl type enum
.. data:: acl = 1
ACL
.. data:: prefix_list = 2
Prefix List
"""
acl = Enum.YLeaf(1, "acl")
prefix_list = Enum.YLeaf(2, "prefix-list")
class Ipv6PrefixMatchExactLength(Enum):
"""
Ipv6PrefixMatchExactLength (Enum Class)
Ipv6 prefix match exact length
.. data:: match_exact_length = 1
Prefix Length Exact match
"""
match_exact_length = Enum.YLeaf(1, "match-exact-length")
class Ipv6PrefixMatchMaxLength(Enum):
"""
Ipv6PrefixMatchMaxLength (Enum Class)
Ipv6 prefix match max length
.. data:: match_max_length = 3
Enable matching of Prefix Lengths lesser than
MaxPrefixLength
"""
match_max_length = Enum.YLeaf(3, "match-max-length")
class Ipv6PrefixMatchMinLength(Enum):
"""
Ipv6PrefixMatchMinLength (Enum Class)
Ipv6 prefix match min length
.. data:: match_min_length = 2
Enable matching of Prefix Lengths greater than
MinPrefixLength
"""
match_min_length = Enum.YLeaf(2, "match-min-length")
| 18.197391
| 126
| 0.624361
|
0d8a4340868fe6ed30637cf1e8c52f187b8b49cc
| 15,036
|
py
|
Python
|
modoboa/core/models.py
|
Arvedui/modoboa
|
404f4bc0f544506d23ac534ff02ae6c6e3b6558c
|
[
"ISC"
] | null | null | null |
modoboa/core/models.py
|
Arvedui/modoboa
|
404f4bc0f544506d23ac534ff02ae6c6e3b6558c
|
[
"ISC"
] | null | null | null |
modoboa/core/models.py
|
Arvedui/modoboa
|
404f4bc0f544506d23ac534ff02ae6c6e3b6558c
|
[
"ISC"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Core models."""
from __future__ import unicode_literals
import re
from email.header import Header
import jsonfield
from reversion import revisions as reversion
from django.conf import settings
from django.contrib.auth.models import AbstractUser, Group
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.urls import reverse
from django.utils.encoding import (
force_str, python_2_unicode_compatible, smart_bytes, smart_text
)
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.core.password_hashers import get_password_hasher
from modoboa.lib.exceptions import (
BadRequest, Conflict, InternalError, PermDeniedException
)
from modoboa.parameters import tools as param_tools
from . import constants, signals
try:
from modoboa.lib.ldap_utils import LDAPAuthBackend
ldap_available = True
except ImportError:
ldap_available = False
@python_2_unicode_compatible
class User(AbstractUser):
"""Custom User model.
It overloads the way passwords are stored into the database. The
main reason to change this mechanism is to ensure the
compatibility with the way Dovecot stores passwords.
It also adds new attributes and methods.
"""
username = models.CharField(max_length=254, unique=True)
email = models.EmailField(max_length=254, blank=True, db_index=True)
is_staff = models.BooleanField(default=False, db_index=True)
is_active = models.BooleanField(default=True, db_index=True)
is_local = models.BooleanField(default=True, db_index=True)
master_user = models.BooleanField(
ugettext_lazy("Allow mailboxes access"), default=False,
help_text=ugettext_lazy(
"Allow this administrator to access user mailboxes"
)
)
password = models.CharField(ugettext_lazy("password"), max_length=256)
language = models.CharField(
ugettext_lazy("language"),
max_length=10, default="en", choices=constants.LANGUAGES,
help_text=ugettext_lazy(
"Prefered language to display pages."
)
)
phone_number = models.CharField(
ugettext_lazy("Phone number"), max_length=128, blank=True, null=True)
secondary_email = models.EmailField(
ugettext_lazy("Secondary email"), max_length=254,
blank=True, null=True,
help_text=ugettext_lazy(
"An alternative e-mail address, can be used for recovery needs.")
)
_parameters = jsonfield.JSONField(default={})
class Meta(object):
ordering = ["username"]
index_together = [
["email", "is_active"]
]
password_expr = re.compile(r'\{([\w\-]+)\}(.+)')
def __init__(self, *args, **kwargs):
"""Load parameter manager."""
super(User, self).__init__(*args, **kwargs)
self.parameters = param_tools.Manager("user", self._parameters)
def _crypt_password(self, raw_value):
"""Crypt the local password using the appropriate scheme.
In case we don't find the scheme (for example when the
management framework is used), we load the parameters and try
one more time.
"""
scheme = param_tools.get_global_parameter(
"password_scheme", raise_exception=False)
if scheme is None:
from modoboa.core.apps import load_core_settings
load_core_settings()
scheme = param_tools.get_global_parameter(
"password_scheme", raise_exception=False)
raw_value = smart_bytes(raw_value)
return get_password_hasher(scheme.upper())().encrypt(raw_value)
def set_password(self, raw_value, curvalue=None):
"""Password update
Update the current mailbox's password with the given clear
value. This value is encrypted according to the defined method
before it is saved.
:param raw_value: the new password's value
:param curvalue: the current password (for LDAP authentication)
"""
if self.is_local:
self.password = self._crypt_password(raw_value)
else:
if not ldap_available:
raise InternalError(
_("Failed to update password: LDAP module not installed")
)
LDAPAuthBackend().update_user_password(
self.username, curvalue, raw_value
)
signals.account_password_updated.send(
sender=self.__class__,
account=self, password=raw_value, created=self.pk is None)
def check_password(self, raw_value):
"""Compare raw_value to current password."""
match = self.password_expr.match(self.password)
if match is None:
return False
raw_value = force_str(raw_value)
scheme = match.group(1)
val2 = match.group(2)
hasher = get_password_hasher(scheme)
return hasher().verify(raw_value, val2)
def __str__(self):
return smart_text(self.get_username())
def get_absolute_url(self):
"""Return detail url for this user."""
return reverse("admin:account_detail", args=[self.pk])
@property
def tags(self):
return [{"name": "account", "label": _("account"), "type": "idt"},
{"name": self.role, "label": self.role,
"type": "grp", "color": "info"}]
@property
def fullname(self):
result = self.username
if self.first_name != "":
result = self.first_name
if self.last_name != "":
if result != "":
result += " "
result += self.last_name
return result
@property
def identity(self):
return self.username
@property
def name_or_rcpt(self):
if self.first_name != "":
return "%s %s" % (self.first_name, self.last_name)
return "----"
@property
def enabled(self):
return self.is_active
@property
def encoded_address(self):
if self.first_name != "" or self.last_name != "":
return '"{}" <{}>'.format(
Header(self.fullname, "utf8").encode(), self.email)
return self.email
def is_owner(self, obj):
"""Tell is the user is the unique owner of this object
:param obj: an object inheriting from ``models.Model``
:return: a boolean
"""
ct = ContentType.objects.get_for_model(obj)
try:
ooentry = self.objectaccess_set.get(
content_type=ct, object_id=obj.id)
except ObjectAccess.DoesNotExist:
return False
return ooentry.is_owner
def can_access(self, obj):
"""Check if the user can access a specific object
This function is recursive: if the given user hasn't got
direct access to this object and if he has got access to other
``User`` objects, we check if one of those users owns the
object.
:param obj: a admin object
:return: a boolean
"""
if self.is_superuser:
return True
ct = ContentType.objects.get_for_model(obj)
try:
ooentry = self.objectaccess_set.get(
content_type=ct, object_id=obj.id)
except ObjectAccess.DoesNotExist:
pass
else:
return True
if ct.model == "user":
return False
ct = ContentType.objects.get_for_model(self)
qs = self.objectaccess_set.filter(content_type=ct)
for ooentry in qs.all():
if ooentry.content_object.is_owner(obj):
return True
return False
@property
def role(self):
"""Return user role."""
if not hasattr(self, "_role"):
if self.is_superuser:
self._role = "SuperAdmins"
else:
try:
self._role = self.groups.all()[0].name
except IndexError:
self._role = "---"
return self._role
@role.setter
def role(self, role):
"""Set administrative role for this account
:param string role: the role to set
"""
if role is None or self.role == role:
return
signals.account_role_changed.send(
sender=self.__class__, account=self, role=role)
self.groups.clear()
if role == "SuperAdmins":
self.is_superuser = True
else:
if self.is_superuser:
ObjectAccess.objects.filter(user=self).delete()
self.is_superuser = False
try:
self.groups.add(Group.objects.get(name=role))
except Group.DoesNotExist:
self.groups.add(Group.objects.get(name="SimpleUsers"))
if role != "SimpleUsers" and not self.can_access(self):
from modoboa.lib.permissions import grant_access_to_object
grant_access_to_object(self, self)
self.save()
self._role = role
def get_role_display(self):
"""Return the display name of this role."""
for role in constants.ROLES:
if role[0] == self.role:
return role[1]
return _("Unknown")
@cached_property
def is_admin(self):
"""Shortcut to check if user is administrator."""
return self.role in constants.ADMIN_GROUPS
def post_create(self, creator):
"""Grant permission on this user to creator."""
from modoboa.lib.permissions import grant_access_to_object
grant_access_to_object(creator, self, is_owner=True)
def save(self, *args, **kwargs):
creator = kwargs.pop("creator", None)
super(User, self).save(*args, **kwargs)
if creator is not None:
self.post_create(creator)
def from_csv(self, user, row, crypt_password=True):
"""Create a new account from a CSV file entry.
The expected order is the following::
"account", loginname, password, first name, last name, enabled, role
Additional fields can be added using the *account_imported* signal.
:param user: a ``core.User`` instance
:param row: a list containing the expected information
:param crypt_password:
"""
from modoboa.lib.permissions import get_account_roles
if len(row) < 7:
raise BadRequest(_("Invalid line"))
desired_role = row[6].strip()
if not user.is_superuser:
allowed_roles = get_account_roles(user)
allowed_roles = [role[0] for role in allowed_roles]
if desired_role not in allowed_roles:
raise PermDeniedException(_(
"You can't import an account with a role greater than "
"yours"
))
self.username = row[1].strip().lower()
try:
User.objects.get(username=self.username)
except User.DoesNotExist:
pass
else:
raise Conflict
if desired_role == "SimpleUsers":
if len(row) < 8 or not row[7].strip():
raise BadRequest(
_("The simple user '%s' must have a valid email address"
% self.username)
)
if self.username != row[7].strip():
raise BadRequest(
_("username and email fields must not differ for '%s'"
% self.username)
)
if crypt_password:
self.set_password(row[2].strip())
else:
self.password = row[2].strip()
self.first_name = row[3].strip()
self.last_name = row[4].strip()
self.is_active = (row[5].strip().lower() in ["true", "1", "yes", "y"])
self.language = settings.LANGUAGE_CODE
self.save()
self.role = desired_role
self.post_create(user)
if len(row) < 8:
return
signals.account_imported.send(
sender=self.__class__, user=user, account=self, row=row[7:])
def to_csv(self, csvwriter):
"""Export this account.
The CSV format is used to export.
:param csvwriter: csv object
"""
row = [
"account",
smart_text(self.username),
smart_text(self.password),
smart_text(self.first_name),
smart_text(self.last_name),
smart_text(self.is_active),
smart_text(self.role),
smart_text(self.email)
]
results = signals.account_exported.send(
sender=self.__class__, user=self)
for result in results:
row += result[1]
csvwriter.writerow(row)
reversion.register(User)
def populate_callback(user, group="SimpleUsers"):
"""Populate callback
If the LDAP authentication backend is in use, this callback will
be called each time a new user authenticates succesfuly to
Modoboa. This function is in charge of creating the mailbox
associated to the provided ``User`` object.
:param user: a ``User`` instance
"""
from modoboa.lib.permissions import grant_access_to_object
sadmins = User.objects.filter(is_superuser=True)
user.role = group
user.post_create(sadmins[0])
for su in sadmins[1:]:
grant_access_to_object(su, user)
signals.account_auto_created.send(
sender="populate_callback", user=user)
@python_2_unicode_compatible
class ObjectAccess(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
is_owner = models.BooleanField(default=False)
class Meta(object):
unique_together = (("user", "content_type", "object_id"),)
def __str__(self):
return "%s => %s (%s)" % (
self.user, self.content_object, self.content_type
)
class Log(models.Model):
"""Simple log in database."""
date_created = models.DateTimeField(auto_now_add=True)
message = models.CharField(max_length=255)
level = models.CharField(max_length=15)
logger = models.CharField(max_length=30)
class LocalConfig(models.Model):
"""Store instance configuration here."""
api_pk = models.PositiveIntegerField(null=True)
site = models.ForeignKey("sites.Site", on_delete=models.CASCADE)
# API results cache
api_versions = jsonfield.JSONField()
_parameters = jsonfield.JSONField(default={})
def __init__(self, *args, **kwargs):
"""Load parameter manager."""
super(LocalConfig, self).__init__(*args, **kwargs)
self.parameters = param_tools.Manager("global", self._parameters)
| 33.118943
| 78
| 0.618183
|
a96c26dc03007de2017b22a3ffa3914a7819ed92
| 1,059
|
py
|
Python
|
5.1 Newton otimizacao nao linear irrestrito.py
|
HigorAnjos/Fundamentos-VI
|
e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb
|
[
"MIT"
] | null | null | null |
5.1 Newton otimizacao nao linear irrestrito.py
|
HigorAnjos/Fundamentos-VI
|
e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb
|
[
"MIT"
] | null | null | null |
5.1 Newton otimizacao nao linear irrestrito.py
|
HigorAnjos/Fundamentos-VI
|
e0aa3cb37e4c54d24ac7123ea3bd8038196e0edb
|
[
"MIT"
] | null | null | null |
from FuncoesCauchyNewton import *
from sympy import *
import numpy as np
x = Symbol('x')
y = Symbol('y')
ponto_x = Matrix([1, 0])
f = 1/2*(x - 2)**2 + (y - 1)**2
constante = 1
Matdiff = Matrix([[diff(f.subs(y, constante))], [diff(f.subs(x, constante))]]) # vetor derivada primeira
d = - Matdiff.subs(x, ponto_x[0]).subs(y, ponto_x[1])
Matdiffseg = Matrix([[diff(diff(f.subs(y, constante)).subs(y, constante)), 0], [0, diff(diff(f.subs(x, constante)).subs(x, constante))]])
mat = Matdiffseg.subs(x, ponto_x[0]).subs(y, ponto_x[1])
dk = mat.LUsolve(d)
t = aureat(ponto_x, dk, f)
ponto_x = ponto_x + t * dk
k = 1
valida = Matdiff.subs(x, ponto_x[0]).subs(y, ponto_x[1])
while valida.norm() >= Epsilon():
d = - Matdiff.subs(x, ponto_x[0]).subs(y, ponto_x[1])
mat = Matdiffseg.subs(x, ponto_x[0]).subs(y, ponto_x[1])
dk = mat.LUsolve(d)
t = aureat(ponto_x, dk, f)
ponto_x = ponto_x + t * dk
k+=1
valida = Matdiff.subs(x, ponto_x[0]).subs(y, ponto_x[1])
print(f'Newton Min = ({ponto_x[0]}, {ponto_x[1]}) iteracao = {k}')
| 24.627907
| 137
| 0.620397
|
76b1aad19ca5850515d3525d0a82f62145059d08
| 14,200
|
py
|
Python
|
userbot/plugins/so_IQ.py
|
TeleOniOn/telethon-iraq
|
a247d2a44c547a2a98befdc4d7be93d55a077f70
|
[
"Apache-2.0"
] | null | null | null |
userbot/plugins/so_IQ.py
|
TeleOniOn/telethon-iraq
|
a247d2a44c547a2a98befdc4d7be93d55a077f70
|
[
"Apache-2.0"
] | null | null | null |
userbot/plugins/so_IQ.py
|
TeleOniOn/telethon-iraq
|
a247d2a44c547a2a98befdc4d7be93d55a077f70
|
[
"Apache-2.0"
] | null | null | null |
# credits to @TeleOniOn
from telethon import events
import asyncio
from userbot.utils import admin_cmd
from userbot import ALIVE_NAME
import random, re
from userbot import CMD_HELP
from collections import deque
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "cat"
@borg.on(admin_cmd(pattern=f"loading$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 20)
animation_chars = [
"▮",
"▯",
"▬",
"▭",
""
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@borg.on(admin_cmd(pattern=f"square$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 20)
animation_chars = [
"◧",
"◨",
"◧",
"◨",
""
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@borg.on(admin_cmd(pattern=f"up$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 20)
animation_chars = [
"╹",
"╻",
"╹",
"╻",
""
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@borg.on(admin_cmd(pattern=f"moni$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 20)
animation_chars = [
"⚫",
"⬤",
"●",
"∘",
""
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@borg.on(admin_cmd(pattern=f"hart$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 20)
animation_chars = [
"🖤",
"❤️",
"🖤",
"❤️",
""
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@borg.on(admin_cmd(pattern=f"anim$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 11)
animation_chars = [
"😁",
"😧",
"😡",
"😢",
"**Repo of catuserbot**",
"😁",
"😧",
"😡",
"😢",
"[](github.com/sandy1709/catuserbot)",
"__**Good to See you guys....**__"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@borg.on(admin_cmd(pattern=f"fnl$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 6)
animation_chars = [
"😁🏿",
"😁🏾",
"😁🏽",
"😁🏼",
"😁",
"**Good to See you friend....**"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@borg.on(admin_cmd(pattern=f"monkey$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 6)
animation_chars = [
"🐵",
"🙉",
"🙈",
"🙊",
"🖕🐵🖕",
"**Good to See you friend....**"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 6])
@borg.on(admin_cmd(pattern=f"herber$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 11)
animation_chars = [
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 10%\n\n ●○○○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 5.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.13GB\n **🔹used:** 33.77GB\n **🔹total:** 60.0GB\n \n ●●●●●●●○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 158.98GB\n **🔹recv:** 146.27GB\n **🔹sent_packets:** 84518799\n **🔹recv_packets:** 159720314\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 30%\n\n ●●●○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 20.4%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 7.18GB\n **🔹used:** 28.26GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 146.27GB\n **🔹recv:** 124.33GB\n **🔹sent_packets:** 54635686\n **🔹recv_packets:** 143565654\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 60%\n\n ●●●●●●○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 60.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 6.52GB\n **🔹used:** 35.78GB\n **🔹total:** 60.0GB\n \n ●●●○○○○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 124.33GB\n **🔹recv:** 162.48GB\n **🔹sent_packets:** 25655655\n **🔹recv_packets:** 165289456\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 100%\n\n ●●●●●●●●●●\n\n **🔹cpu core**\n\n **🔹core_usage:** 100.0%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 9.81GB\n **🔹used:** 30.11GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 162.48GB\n **🔹recv:** 175.75GB\n **🔹sent_packets:** 56565435\n **🔹recv_packets:** 135345655\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 70%\n\n ●●●●●●●○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 80.4%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 5.76GB\n **🔹used:** 29.35GB\n **🔹total:** 60.0GB\n \n ●●●●●●●○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 175.75GB\n **🔹recv:** 118.55GB\n **🔹sent_packets:** 36547698\n **🔹recv_packets:** 185466554\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 60%\n\n ●●●●●●○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 62.9%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.23GB\n **🔹used:** 33.32GB\n **🔹total:** 60.0GB\n \n ●●●●●●○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 118.55GB\n **🔹recv:** 168.65GB\n **🔹sent_packets:** 24786554\n **🔹recv_packets:** 156745865\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 30%\n\n ●●●○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 30.6%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 9.75GB\n **🔹used:** 36.54GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 168.65GB\n **🔹recv:** 128.35GB\n **🔹sent_packets:** 56565435\n **🔹recv_packets:** 1475823589\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 10%\n\n ●○○○○○○○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 10.2%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 10.20GB\n **🔹used:** 25.40GB\n **🔹total:** 60.0GB\n \n ●●●●●●○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 128.35GB\n **🔹recv:** 108.31GB\n **🔹sent_packets:** 54635686\n **🔹recv_packets:** 157865426\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 100%\n\n ●●●●●●●●●●\n\n **🔹cpu core**\n\n **🔹core_usage:** 100.0%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 5.25GB\n **🔹used:** 31.14GB\n **🔹total:** 60.0GB\n \n ●●●●●●●●●●\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 108.31GB\n **🔹recv:** 167.17GB\n **🔹sent_packets:** 84518799\n **🔹recv_packets:** 124575356\n\n\n**===================**\n",
"**===================**\n **Server Details** \n**===================**\n\n\n**=>>> CPU <<<=**\n\n **🔹current_freq:** 2500.09MHz\n **🔹total_usage:** 70%\n\n ●●●●●●●○○○\n\n **🔹cpu core**\n\n **🔹core_usage:** 76.2%\n **🔹current_freq:** 2500.09MHz\n |██████████▉ |\n \n**=>>> RAM <<<=**\n\n **🔹free:** 8.01GB\n **🔹used:** 33.27GB\n **🔹total:** 60.0GB\n \n ●●●○○○○○○○\n\n\n**=>>> DISK <<<=**\n\n **🔹free:** 224.12GB\n **🔹used:** 131.84GB\n **🔹total:** 375.02GB\n **🔹usage:** 37.0%\n\n |████▍ |\n\n\n**=>>> NETWORK <<<=**\n\n **🔹sent:** 167.17GB\n **🔹recv:** 158.98GB\n **🔹sent_packets:** 36547698\n **🔹recv_packets:** 165455856\n\n\n**===================**\n",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
@borg.on(admin_cmd(pattern=f"hand$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 14)
animation_chars = [
"👈",
"👉",
"☝️",
"👆",
"🖕",
"👇",
"✌️",
"🤞",
"🖖",
"🤘",
"🤙",
"🖐️",
"👌"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 14])
@borg.on(admin_cmd(pattern=f"gsg$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 13)
animation_chars = [
"🔟",
"9️⃣",
"8️⃣",
"7️⃣",
"6️⃣",
"5️⃣",
"4️⃣",
"3️⃣",
"2️⃣",
"1️⃣",
"0️⃣",
"🆘"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 13])
@borg.on(admin_cmd(pattern=r"theart$", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 54)
animation_chars = [
"❤️",
"🧡",
"💛",
"💚",
"💙",
"💜",
"🖤",
"💘",
"💝",
"❤️",
"🧡",
"💛",
"💚",
"💙",
"💜",
"🖤",
"💘",
"💝"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
| 55.03876
| 781
| 0.406338
|
c74f1b750e0ef58e421a7c8b5c1ac411bce8f83e
| 2,584
|
py
|
Python
|
vk/bot_framework/dispatcher/handler.py
|
fossabot/vk.py
|
94d5c719eb8da6d778d2be208038c447971d5cff
|
[
"MIT"
] | null | null | null |
vk/bot_framework/dispatcher/handler.py
|
fossabot/vk.py
|
94d5c719eb8da6d778d2be208038c447971d5cff
|
[
"MIT"
] | null | null | null |
vk/bot_framework/dispatcher/handler.py
|
fossabot/vk.py
|
94d5c719eb8da6d778d2be208038c447971d5cff
|
[
"MIT"
] | null | null | null |
import abc
import asyncio
import logging
import typing
from vk.bot_framework.dispatcher import data_
from vk.bot_framework.dispatcher.rule import BaseRule
from vk.types.events.community.events_list import Event
logger = logging.getLogger(__name__)
class BaseHandler(abc.ABC):
@property
def event_type(self) -> Event:
raise NotImplementedError()
@property
def handler(self) -> typing.Callable:
raise NotImplementedError()
@property
def rules(self) -> typing.List[BaseRule]:
raise NotImplementedError()
@property
def meta(self) -> dict:
raise NotImplementedError()
@meta.setter
def meta(self, value):
raise NotImplementedError()
@abc.abstractmethod
async def execute_handler(self, *args):
"""
Execute handler (after handler rules.)
args - (event, data)
"""
class SkipHandler(Exception):
"""
Raise this when you want skip handlers.
"""
pass
class Handler:
def __init__(
self, event_type: Event, handler: typing.Callable, rules: typing.List[BaseRule]
):
"""
:param event_type: type of event which this handler accepted
:param handler: coroutine
:param rules: list of rules which be executed
"""
self.event_type: Event = event_type
self.handler: typing.Callable = handler
self.rules: typing.List[BaseRule] = rules
self._meta: typing.Dict[str, typing.Any] = {}
@property
def meta(self):
return self._meta
@meta.setter
def meta(self, value: typing.Dict[str, typing.Any]):
self._meta = value
async def execute_handler(self, *args):
"""
Execute rules and handler
:param args:
:return:
"""
# args - (event, data)
if self.rules:
_execute = False
for rule in self.rules:
if not asyncio.iscoroutinefunction(rule) and not isinstance(
rule, BaseRule
):
result = rule(*args)
else:
result = await rule(*args)
if not result:
_execute = False
break
if isinstance(result, dict):
args[1].update(result)
data_.set(args[1])
_execute = True
if _execute:
await self.handler(*args)
return True
else:
await self.handler(*args)
return True
| 24.846154
| 87
| 0.566176
|
daae9923c4299e7e4b25b6125c3f59c5e1730ed8
| 7,671
|
py
|
Python
|
T4_trd_env.py
|
EY4L/Bitcoin-trading-bot
|
d57325e165ddb2731089cf0a600114617bff0d82
|
[
"MIT"
] | null | null | null |
T4_trd_env.py
|
EY4L/Bitcoin-trading-bot
|
d57325e165ddb2731089cf0a600114617bff0d82
|
[
"MIT"
] | null | null | null |
T4_trd_env.py
|
EY4L/Bitcoin-trading-bot
|
d57325e165ddb2731089cf0a600114617bff0d82
|
[
"MIT"
] | 1
|
2021-11-21T15:47:44.000Z
|
2021-11-21T15:47:44.000Z
|
import gym
from gym import spaces
import pandas as pd
import numpy as np
import random
import gym
from gym import spaces
import pandas as pd
import numpy as np
import random
class trading_env(gym.Env):
"""Single Stock Trading Environment"""
def __init__(self,df, init_capital=10000):
#instance attributes
self.df = df
self.initial_capital = init_capital
self.current_step = None
#Porfolio Information
self.no_stocks_bought = None
self.no_stocks_sold = None
self.portfolio_value = None
self.current_stocks_held = None
self.current_capital = None
self.avg_cost = None
self.buy_cost = None
self.returns = None
self.max_steps = None
self.previous_portfolio_value = None
#Values for normalising data
self.max_stock_price = max(self.df["Close"])
self.max_volume = max(self.df["Volume_(BTC)"])
self.max_capital = 1000000
self.max_no_shares = 10000
def observation(self):
#-6 the predefined lookback window
# env_observations = np.array([self.df.loc[self.current_step-5:self.current_step,"Close"].values/self.max_stock_price,
# self.df.loc[self.current_step-5:self.current_step,"Volume_(BTC)"].values/self.max_volume,
# self.df.loc[self.current_step-5:self.current_step,"MACD_status"].values,
# self.df.loc[self.current_step-5:self.current_step,"RSI_status"].values,
# self.df.loc[self.current_step-5:self.current_step,"EMA_status"].values,
# self.df.loc[self.current_step-5:self.current_step,"3D_return_norm"].values]
# ) #Not required for Q-learning, only using 2 variables, combined_indicators & return_norm
# obs = np.append(env_observations,[[
# self.current_capital/self.max_capital,
# self.portfolio_value/self.max_capital,
# self.returns/self.initial_capital, # not sure how to normalise returns since it can be a negative value
# self.no_stocks_bought/self.max_no_shares,
# self.no_stocks_sold/self.max_no_shares,
# self.avg_cost/self.max_stock_price
# ]],axis = 0)
obs = np.array([self.df.loc[self.current_step,"3D_return_norm"], self.df.loc[self.current_step,"MACD_status"],self.df.loc[self.current_step,"RSI_status"],self.df.loc[self.current_step,"EMA_status"],self.df.loc[self.current_step,"Pred_status"]])
return obs
def step(self,a):
self.action(a)
self.current_step += 1
if self.current_step > len(self.df.loc[:,"Open"].values):
self.current_step = 0 # Sanity check ensuring that current step isn't greater than 6 steps ahead
delay = self.current_step/self.max_steps
reward = (self.portfolio_value - self.previous_portfolio_value)/self.previous_portfolio_value
if self.current_step == len(self.df):
self.done = True
elif self.portfolio_value == 0:
self.done = True
obs = self.observation()
return obs,float(reward), self.done
def action(self,a):
self.amount = 0
current_price = random.uniform(self.df.loc[self.current_step,"Open"],self.df.loc[self.current_step,"Close"])
#Buy at the low and sell high
if self.df.loc[self.current_step,"3D_return"] < -0.19:
self.amount = random.uniform(0.3,0.5)
elif (self.df.loc[self.current_step,"3D_return"] > -0.19) & (self.df.loc[self.current_step,"3D_return"]<-0.02):
self.amount = random.uniform(0.1,0.3)
elif self.df.loc[self.current_step,"3D_return"] > 0.3:
self.amount = random.uniform(0.3,0.5)
elif (self.df.loc[self.current_step,"3D_return"] >0.1) & (self.df.loc[self.current_step,"3D_return"]<0.3):
self.amount = random.uniform(0.1,0.3)
action_taken = a
if action_taken == 2: # Buy
total_possible = self.current_capital/current_price
amount_stocks_bought = total_possible
current_cost = amount_stocks_bought * current_price
self.buy_cost += current_cost
self.no_stocks_bought += amount_stocks_bought
self.current_stocks_held += amount_stocks_bought
self.avg_cost = float(self.buy_cost) / float(self.current_stocks_held)
self.current_capital -= current_cost #attemps to incentivise buying behaviour at prices lower than the average cost
self.previous_portfolio_value = self.portfolio_value
self.portfolio_value = self.current_capital + (self.current_stocks_held*current_price)
elif action_taken <= 0: #Sell
shares_sell = self.current_stocks_held
profit = shares_sell * current_price
self.no_stocks_sold += shares_sell
self.current_stocks_held -= shares_sell
self.current_capital += profit
self.returns = profit - (shares_sell * self.avg_cost)
self.buy_cost -= shares_sell * self.avg_cost
self.previous_portfolio_value = self.portfolio_value
self.portfolio_value = self.current_capital + (self.current_stocks_held*current_price)
elif action_taken == 1:
self.previous_portfolio_value = self.portfolio_value
self.portfolio_value -= self.portfolio_value *0.1 #holding should only be considered beneficial if current price of all assets > average price of assets, besides that selling is better
if self.current_capital > self.max_capital:
self.max_capital = self.current_capital
if self.current_stocks_held <= 0:
self.avg_cost == 0
def reset(self):
self.no_stocks_bought = 0.00000001 #to avoid double scalar problems
self.no_stocks_sold = 0.0000001 #to avoid double scalar problems
self.current_stocks_held = 0.000001
self.portfolio_value = self.initial_capital
self.current_capital = self.initial_capital
self.avg_cost = 0
self.returns = 0
self.max_steps = len(self.df)
self.current_step = 0
self.buy_cost = 0
self.previous_portfolio_value = 0
self.done = False
return self.observation()
def render(self):
current_price = random.uniform(self.df.loc[self.current_step, "Open"],self.df.loc[self.current_step,"Close"])
self.portfolio_value = self.current_capital + (self.current_stocks_held*current_price)
return_perc = (self.portfolio_value-self.initial_capital)/self.initial_capital * 100
print(f"Current Porfolio Value:{self.portfolio_value}; Available Capital: {self.current_capital}; Current Stocks Held: {self.current_stocks_held}")
print(f"No. Stocks Bought:{self.no_stocks_bought}; No. Stocks Sold:{self.no_stocks_sold}; Average Cost:{self.avg_cost} ")
print(f"Return:{return_perc}%; {self.portfolio_value-self.initial_capital}")
print(f"Termination date: {self.df.loc[self.current_step,'Timestamp']}")
def reward_output(self):
return_value = self.portfolio_value-self.initial_capital
return_perc = (self.portfolio_value/self.initial_capital) * 100
return return_perc, return_value, self.no_stocks_bought,self.no_stocks_sold
| 47.645963
| 252
| 0.639421
|
2c4b93c40bbcb708ed1954792251763307ebe751
| 786
|
py
|
Python
|
rest_service/client_rest.py
|
mgzeke0/movie_classifier
|
df24e29e8a3c3f04e77a13a03d301f4daa6b8385
|
[
"Apache-2.0"
] | null | null | null |
rest_service/client_rest.py
|
mgzeke0/movie_classifier
|
df24e29e8a3c3f04e77a13a03d301f4daa6b8385
|
[
"Apache-2.0"
] | 5
|
2020-09-25T21:16:41.000Z
|
2022-02-10T00:38:07.000Z
|
rest_service/client_rest.py
|
mgzeke0/movie_classifier
|
df24e29e8a3c3f04e77a13a03d301f4daa6b8385
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import traceback
import requests
from config import PORT
def test():
"""
Utility function to debug FastAPI service
:return:
"""
url = f'http://127.0.0.1:{PORT}/predict_genre/'
while True:
title = input('Insert movie title: ')
overview = input('Insert movie description: ')
if title in {'q', 'quit'} or overview in {'q', 'quit'}:
break
try:
input_obj = {
'title': [title],
'overview': [overview],
}
res = requests.post(url, json=input_obj)
result = json.loads(res.content)
print(result)
except Exception:
traceback.print_exc()
if __name__ == '__main__':
test()
| 22.457143
| 63
| 0.52799
|
5e64adce2d9f7dffa22395236fbf1f9e486cc783
| 3,177
|
py
|
Python
|
airflow/providers/datadog/sensors/datadog.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/datadog/sensors/datadog.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 1
|
2020-05-24T15:53:39.000Z
|
2020-05-24T15:53:39.000Z
|
airflow/providers/datadog/sensors/datadog.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datadog import api
from airflow.exceptions import AirflowException
from airflow.providers.datadog.hooks.datadog import DatadogHook
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class DatadogSensor(BaseSensorOperator):
"""
A sensor to listen, with a filter, to datadog event streams and determine
if some event was emitted.
Depends on the datadog API, which has to be deployed on the same server where
Airflow runs.
:param datadog_conn_id: The connection to datadog, containing metadata for api keys.
:param datadog_conn_id: str
"""
ui_color = '#66c3dd'
@apply_defaults
def __init__(
self,
datadog_conn_id='datadog_default',
from_seconds_ago=3600,
up_to_seconds_from_now=0,
priority=None,
sources=None,
tags=None,
response_check=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.datadog_conn_id = datadog_conn_id
self.from_seconds_ago = from_seconds_ago
self.up_to_seconds_from_now = up_to_seconds_from_now
self.priority = priority
self.sources = sources
self.tags = tags
self.response_check = response_check
def poke(self, context):
# This instantiates the hook, but doesn't need it further,
# because the API authenticates globally (unfortunately),
# but for airflow this shouldn't matter too much, because each
# task instance runs in its own process anyway.
DatadogHook(datadog_conn_id=self.datadog_conn_id)
response = api.Event.query(
start=self.from_seconds_ago,
end=self.up_to_seconds_from_now,
priority=self.priority,
sources=self.sources,
tags=self.tags)
if isinstance(response, dict) and response.get('status', 'ok') != 'ok':
self.log.error("Unexpected Datadog result: %s", response)
raise AirflowException("Datadog returned unexpected result")
if self.response_check:
# run content check on response
return self.response_check(response)
# If no check was inserted, assume any event that matched yields true.
return len(response) > 0
| 37.376471
| 88
| 0.687441
|
dcf03fe5aa03bde678f2ed82914789262f44883a
| 9,009
|
py
|
Python
|
nova/tests/unit/cells/test_cells_weights.py
|
lixiaoy1/nova
|
357b8b38e88300948bb2e07d1bbaabd1e9d7b60e
|
[
"Apache-2.0"
] | 2
|
2021-10-11T04:56:25.000Z
|
2022-02-16T08:49:29.000Z
|
nova/tests/unit/cells/test_cells_weights.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 132
|
2017-03-27T11:31:52.000Z
|
2022-03-30T08:45:02.000Z
|
nova/tests/unit/cells/test_cells_weights.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 8
|
2017-03-27T07:50:38.000Z
|
2020-02-14T16:55:56.000Z
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for testing the cells weight algorithms.
Cells with higher weights should be given priority for new builds.
"""
import datetime
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
from nova.cells import state
from nova.cells import weights
from nova import test
class FakeCellState(state.CellState):
def __init__(self, cell_name):
super(FakeCellState, self).__init__(cell_name)
self.capacities['ram_free'] = {'total_mb': 0,
'units_by_mb': {}}
self.db_info = {}
def _update_ram_free(self, *args):
ram_free = self.capacities['ram_free']
for ram_size, units in args:
ram_free['total_mb'] += units * ram_size
ram_free['units_by_mb'][str(ram_size)] = units
def _get_fake_cells():
cell1 = FakeCellState('cell1')
cell1._update_ram_free((512, 1), (1024, 4), (2048, 3))
cell1.db_info['weight_offset'] = -200.0
cell2 = FakeCellState('cell2')
cell2._update_ram_free((512, 2), (1024, 3), (2048, 4))
cell2.db_info['weight_offset'] = -200.1
cell3 = FakeCellState('cell3')
cell3._update_ram_free((512, 3), (1024, 2), (2048, 1))
cell3.db_info['weight_offset'] = 400.0
cell4 = FakeCellState('cell4')
cell4._update_ram_free((512, 4), (1024, 1), (2048, 2))
cell4.db_info['weight_offset'] = 300.0
return [cell1, cell2, cell3, cell4]
class CellsWeightsTestCase(test.NoDBTestCase):
"""Makes sure the proper weighers are in the directory."""
def test_all_weighers(self):
weighers = weights.all_weighers()
# Check at least a couple that we expect are there
self.assertGreaterEqual(len(weighers), 2)
class_names = [cls.__name__ for cls in weighers]
self.assertIn('WeightOffsetWeigher', class_names)
self.assertIn('RamByInstanceTypeWeigher', class_names)
class _WeigherTestClass(test.NoDBTestCase):
"""Base class for testing individual weigher plugins."""
weigher_cls_name = None
def setUp(self):
super(_WeigherTestClass, self).setUp()
self.weight_handler = weights.CellWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
[self.weigher_cls_name])
self.weighers = [cls() for cls in weigher_classes]
def _get_weighed_cells(self, cells, weight_properties):
return self.weight_handler.get_weighed_objects(self.weighers,
cells, weight_properties)
class RAMByInstanceTypeWeigherTestClass(_WeigherTestClass):
weigher_cls_name = ('nova.cells.weights.ram_by_instance_type.'
'RamByInstanceTypeWeigher')
def test_default_spreading(self):
"""Test that cells with more ram available return a higher weight."""
cells = _get_fake_cells()
# Simulate building a new 512MB instance.
instance_type = {'memory_mb': 512}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[3], cells[2], cells[1], cells[0]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 1024MB instance.
instance_type = {'memory_mb': 1024}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[0], cells[1], cells[2], cells[3]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 2048MB instance.
instance_type = {'memory_mb': 2048}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[1], cells[0], cells[3], cells[2]]
self.assertEqual(expected_cells, resulting_cells)
def test_negative_multiplier(self):
"""Test that cells with less ram available return a higher weight."""
self.flags(ram_weight_multiplier=-1.0, group='cells')
cells = _get_fake_cells()
# Simulate building a new 512MB instance.
instance_type = {'memory_mb': 512}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[0], cells[1], cells[2], cells[3]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 1024MB instance.
instance_type = {'memory_mb': 1024}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[3], cells[2], cells[1], cells[0]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 2048MB instance.
instance_type = {'memory_mb': 2048}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[2], cells[3], cells[0], cells[1]]
self.assertEqual(expected_cells, resulting_cells)
class WeightOffsetWeigherTestClass(_WeigherTestClass):
"""Test the RAMWeigher class."""
weigher_cls_name = 'nova.cells.weights.weight_offset.WeightOffsetWeigher'
def test_weight_offset(self):
"""Test that cells with higher weight_offsets return higher
weights.
"""
cells = _get_fake_cells()
weighed_cells = self._get_weighed_cells(cells, {})
self.assertEqual(4, len(weighed_cells))
expected_cells = [cells[2], cells[3], cells[0], cells[1]]
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
self.assertEqual(expected_cells, resulting_cells)
class MuteWeigherTestClass(_WeigherTestClass):
weigher_cls_name = 'nova.cells.weights.mute_child.MuteChildWeigher'
def setUp(self):
super(MuteWeigherTestClass, self).setUp()
self.flags(mute_weight_multiplier=-10.0, mute_child_interval=100,
group='cells')
self.now = timeutils.utcnow()
self.useFixture(utils_fixture.TimeFixture(self.now))
self.cells = _get_fake_cells()
for cell in self.cells:
cell.last_seen = self.now
def test_non_mute(self):
weight_properties = {}
weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
for weighed_cell in weighed_cells:
self.assertEqual(0, weighed_cell.weight)
def test_mutes(self):
# make 2 of them mute:
self.cells[0].last_seen = (self.cells[0].last_seen -
datetime.timedelta(seconds=200))
self.cells[1].last_seen = (self.cells[1].last_seen -
datetime.timedelta(seconds=200))
weight_properties = {}
weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
for i in range(2):
weighed_cell = weighed_cells.pop(0)
self.assertEqual(0, weighed_cell.weight)
self.assertIn(weighed_cell.obj.name, ['cell3', 'cell4'])
for i in range(2):
weighed_cell = weighed_cells.pop(0)
self.assertEqual(-10.0, weighed_cell.weight)
self.assertIn(weighed_cell.obj.name, ['cell1', 'cell2'])
| 41.516129
| 78
| 0.676768
|
ce1bf201a160fdb55900343f242bdac3cbfd2131
| 1,019
|
py
|
Python
|
sample_app/migrations/0001_initial.py
|
Draft2Digital/react-in-django
|
09906fd69b5ce574d3cb798e4ea1f6a3334f9dfb
|
[
"MIT"
] | null | null | null |
sample_app/migrations/0001_initial.py
|
Draft2Digital/react-in-django
|
09906fd69b5ce574d3cb798e4ea1f6a3334f9dfb
|
[
"MIT"
] | null | null | null |
sample_app/migrations/0001_initial.py
|
Draft2Digital/react-in-django
|
09906fd69b5ce574d3cb798e4ea1f6a3334f9dfb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.TextField()),
('last_name', models.TextField()),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.TextField()),
('copyright', models.DateField()),
('description', models.TextField()),
('author', models.ForeignKey(to='sample_app.Author')),
],
),
]
| 30.878788
| 114
| 0.533857
|
04f83863146f3e50ce4964fcf0e8e9dd5376764b
| 11,882
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/storage/_client_factory.py
|
chef-davin/azure-cli
|
2ebd33d5f69a27d07404cc48cf475d5fbdda6378
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/storage/_client_factory.py
|
chef-davin/azure-cli
|
2ebd33d5f69a27d07404cc48cf475d5fbdda6378
|
[
"MIT"
] | 1
|
2021-06-02T04:24:22.000Z
|
2021-06-02T04:24:22.000Z
|
src/azure-cli/azure/cli/command_modules/storage/_client_factory.py
|
chef-davin/azure-cli
|
2ebd33d5f69a27d07404cc48cf475d5fbdda6378
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_data_service_client
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.command_modules.storage.sdkutil import get_table_data_type
MISSING_CREDENTIALS_ERROR_MESSAGE = """
Missing credentials to access storage service. The following variations are accepted:
(1) account name and key (--account-name and --account-key options or
set AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY environment variables)
(2) account name and SAS token (--sas-token option used with either the --account-name
option or AZURE_STORAGE_ACCOUNT environment variable)
(3) account name (--account-name option or AZURE_STORAGE_ACCOUNT environment variable;
this will make calls to query for a storage account key using login credentials)
(4) connection string (--connection-string option or
set AZURE_STORAGE_CONNECTION_STRING environment variable); some shells will require
quoting to preserve literal character interpretation.
"""
def get_storage_data_service_client(cli_ctx, service, name=None, key=None, connection_string=None, sas_token=None,
socket_timeout=None, token_credential=None):
return get_data_service_client(cli_ctx, service, name, key, connection_string, sas_token,
socket_timeout=socket_timeout,
token_credential=token_credential,
endpoint_suffix=cli_ctx.cloud.suffixes.storage_endpoint)
def generic_data_service_factory(cli_ctx, service, name=None, key=None, connection_string=None, sas_token=None,
socket_timeout=None, token_credential=None):
try:
return get_storage_data_service_client(cli_ctx, service, name, key, connection_string, sas_token,
socket_timeout, token_credential)
except ValueError as val_exception:
_ERROR_STORAGE_MISSING_INFO = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'common._error#_ERROR_STORAGE_MISSING_INFO')
message = str(val_exception)
if message == _ERROR_STORAGE_MISSING_INFO:
message = MISSING_CREDENTIALS_ERROR_MESSAGE
from knack.util import CLIError
raise CLIError(message)
def storage_client_factory(cli_ctx, **_):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
def file_data_service_factory(cli_ctx, kwargs):
t_file_svc = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'file#FileService')
return generic_data_service_factory(cli_ctx, t_file_svc, kwargs.pop('account_name', None),
kwargs.pop('account_key', None),
connection_string=kwargs.pop('connection_string', None),
sas_token=kwargs.pop('sas_token', None))
def page_blob_service_factory(cli_ctx, kwargs):
t_page_blob_service = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'blob.pageblobservice#PageBlobService')
return generic_data_service_factory(cli_ctx, t_page_blob_service, kwargs.pop('account_name', None),
kwargs.pop('account_key', None),
connection_string=kwargs.pop('connection_string', None),
sas_token=kwargs.pop('sas_token', None),
token_credential=kwargs.pop('token_credential', None))
def blob_data_service_factory(cli_ctx, kwargs):
if 'encryption_scope' in kwargs and kwargs['encryption_scope']:
return cf_blob_client(cli_ctx, kwargs)
from azure.cli.command_modules.storage.sdkutil import get_blob_service_by_type
blob_type = kwargs.get('blob_type')
blob_service = get_blob_service_by_type(cli_ctx, blob_type) or get_blob_service_by_type(cli_ctx, 'block')
return generic_data_service_factory(cli_ctx, blob_service, kwargs.pop('account_name', None),
kwargs.pop('account_key', None),
connection_string=kwargs.pop('connection_string', None),
sas_token=kwargs.pop('sas_token', None),
socket_timeout=kwargs.pop('socket_timeout', None),
token_credential=kwargs.pop('token_credential', None))
def table_data_service_factory(cli_ctx, kwargs):
return generic_data_service_factory(cli_ctx,
get_table_data_type(cli_ctx, 'table', 'TableService'),
kwargs.pop('account_name', None),
kwargs.pop('account_key', None),
connection_string=kwargs.pop('connection_string', None),
sas_token=kwargs.pop('sas_token', None))
def queue_data_service_factory(cli_ctx, kwargs):
t_queue_service = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'queue#QueueService')
return generic_data_service_factory(
cli_ctx, t_queue_service,
kwargs.pop('account_name', None),
kwargs.pop('account_key', None),
connection_string=kwargs.pop('connection_string', None),
sas_token=kwargs.pop('sas_token', None),
token_credential=kwargs.pop('token_credential', None))
def cloud_storage_account_service_factory(cli_ctx, kwargs):
t_cloud_storage_account = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def multi_service_properties_factory(cli_ctx, kwargs):
"""Create multiple data services properties instance based on the services option"""
from .services_wrapper import ServiceProperties
t_base_blob_service, t_file_service, t_queue_service, = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'blob.baseblobservice#BaseBlobService',
'file#FileService', 'queue#QueueService')
t_table_service = get_table_data_type(cli_ctx, 'table', 'TableService')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
connection_string = kwargs.pop('connection_string', None)
sas_token = kwargs.pop('sas_token', None)
services = kwargs.pop('services', [])
def get_creator(name, service_type):
return lambda: ServiceProperties(cli_ctx, name, service_type, account_name, account_key, connection_string,
sas_token)
creators = {'b': get_creator('blob', t_base_blob_service), 'f': get_creator('file', t_file_service),
'q': get_creator('queue', t_queue_service), 't': get_creator('table', t_table_service)}
return [creators[s]() for s in services]
def cf_sa(cli_ctx, _):
return storage_client_factory(cli_ctx).storage_accounts
def cf_sa_for_keys(cli_ctx, _):
from knack.log import get_logger
logger = get_logger(__name__)
logger.debug('Disable HTTP logging to avoid having storage keys in debug logs')
client = storage_client_factory(cli_ctx)
client.config.enable_http_logger = False
return client.storage_accounts
def cf_mgmt_policy(cli_ctx, _):
return storage_client_factory(cli_ctx).management_policies
def cf_blob_container_mgmt(cli_ctx, _):
return storage_client_factory(cli_ctx).blob_containers
def cf_mgmt_blob_services(cli_ctx, _):
return storage_client_factory(cli_ctx).blob_services
def cf_mgmt_file_services(cli_ctx, _):
return storage_client_factory(cli_ctx).file_services
def cf_mgmt_file_shares(cli_ctx, _):
return storage_client_factory(cli_ctx).file_shares
def cf_blob_data_gen_update(cli_ctx, kwargs):
return blob_data_service_factory(cli_ctx, kwargs.copy())
def cf_private_link(cli_ctx, _):
return storage_client_factory(cli_ctx).private_link_resources
def cf_private_endpoint(cli_ctx, _):
return storage_client_factory(cli_ctx).private_endpoint_connections
def cf_mgmt_encryption_scope(cli_ctx, _):
return storage_client_factory(cli_ctx).encryption_scopes
def get_account_url(cli_ctx, account_name, service):
from knack.util import CLIError
if account_name is None:
raise CLIError("Please provide storage account name or connection string.")
storage_endpoint = cli_ctx.cloud.suffixes.storage_endpoint
return "https://{}.{}.{}".format(account_name, service, storage_endpoint)
def cf_blob_service(cli_ctx, kwargs):
from knack.util import CLIError
t_blob_service = get_sdk(cli_ctx, ResourceType.DATA_STORAGE_BLOB,
'_blob_service_client#BlobServiceClient')
connection_string = kwargs.pop('connection_string', None)
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
token_credential = kwargs.pop('token_credential', None)
sas_token = kwargs.pop('sas_token', None)
if connection_string:
return t_blob_service.from_connection_string(conn_str=connection_string)
account_url = get_account_url(cli_ctx, account_name=account_name, service='blob')
credential = account_key or sas_token or token_credential
if account_url and credential:
return t_blob_service(account_url=account_url, credential=credential)
raise CLIError("Please provide valid connection string, or account name with account key, "
"sas token or login auth mode.")
def cf_blob_client(cli_ctx, kwargs):
return cf_blob_service(cli_ctx, kwargs).get_blob_client(container=kwargs['container_name'],
blob=kwargs['blob_name'])
def cf_adls_service(cli_ctx, kwargs):
t_adls_service = get_sdk(cli_ctx, ResourceType.DATA_STORAGE_FILEDATALAKE,
'_data_lake_service_client#DataLakeServiceClient')
connection_string = kwargs.pop('connection_string', None)
account_key = kwargs.pop('account_key', None)
token_credential = kwargs.pop('token_credential', None)
sas_token = kwargs.pop('sas_token', None)
if connection_string:
return t_adls_service.from_connection_string(connection_string=connection_string)
account_url = get_account_url(cli_ctx, account_name=kwargs.pop('account_name', None), service='dfs')
credential = account_key or sas_token or token_credential
if account_url and credential:
return t_adls_service(account_url=account_url, credential=credential)
return None
def cf_adls_file_system(cli_ctx, kwargs):
return cf_adls_service(cli_ctx, kwargs).get_file_system_client(file_system=kwargs.pop('file_system_name'))
def cf_adls_directory(cli_ctx, kwargs):
return cf_adls_file_system(cli_ctx, kwargs).get_directory_client(directory=kwargs.pop('directory_path'))
def cf_adls_file(cli_ctx, kwargs):
return cf_adls_service(cli_ctx, kwargs).get_file_client(file_system=kwargs.pop('file_system_name', None),
file_path=kwargs.pop('path', None))
| 47.338645
| 115
| 0.680693
|
1b9c1967527aa13f2b84d821cdb771ccef93c933
| 62,656
|
py
|
Python
|
src/genie/libs/parser/iosxr/show_platform.py
|
miuvlad/genieparser
|
60b1151e3c67c6b55d75e30359d0bf52825efad8
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxr/show_platform.py
|
miuvlad/genieparser
|
60b1151e3c67c6b55d75e30359d0bf52825efad8
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxr/show_platform.py
|
miuvlad/genieparser
|
60b1151e3c67c6b55d75e30359d0bf52825efad8
|
[
"Apache-2.0"
] | null | null | null |
''' show_platform.py
IOSXR parsers for the following show commands:
* 'show version'
* 'show sdr detail'
* 'show platform'
* 'show platform vm'
* 'show install active summary'
* 'show install inactive summary'
* 'show install commit summary'
* 'show inventory'
* 'admin show diag chassis'
* 'show redundancy summary'
* 'show redundancy'
* 'dir'
'''
# Python
import re
import xmltodict
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional, Or, And,\
Default, Use
def regexp(expression):
def match(value):
if re.match(expression,value):
return value
else:
raise TypeError("Value '%s' doesnt match regex '%s'"
%(value,expression))
return match
# =========================
# Parser for 'show version'
# =========================
class ShowVersionSchema(MetaParser):
"""Schema for show version"""
schema = {'operating_system': str,
'software_version': str,
'uptime': str,
Optional('image'): str,
'device_family': str,
Optional('processor'): str,
Optional('processor_memory_bytes'): str,
Optional('chassis_detail'): str,
Optional('config_register'): str,
Optional('rp_config_register'): str,
Optional('main_mem'): str,
}
class ShowVersion(ShowVersionSchema):
"""Parser for show version"""
cli_command = 'show version'
exclude = ['seconds', 'minutes', 'hours', 'uptime']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
show_version_dict = {}
# regex patterns
# Cisco IOS XR Software, Version 6.3.1.15I
# Cisco IOS XR Software, Version 6.1.4.10I[Default]
p1 = re.compile(r'\s*Cisco +IOS +XR +Software, +Version'
' +(?P<software_version>[A-Z0-9\.]+)(?:\[Default\])?$')
# System uptime is 1 week, 1 day, 5 hours, 47 minutes
# PE1 uptime is 3 hours, 11 minutes
p2 = re.compile(r'\s*.* +uptime +is +(?P<uptime>[a-zA-Z0-9\s\,]+)$')
# System image file is "disk0:asr9k-os-mbi-6.1.4.10I/0x100305/mbiasr9k-rsp3.vm"
p3 = re.compile(r'\s*System +image +file +is'
' +\"(?P<image>[a-zA-Z0-9\:\/\.\-]+)\"$')
# cisco IOS-XRv 9000 () processor
p4 = re.compile(r'\s*cisco +(?P<device_family>[a-zA-Z0-9\-\s]+)'
r' +\(\) +processor$')
# cisco ASR9K Series (Intel 686 F6M14S4) processor with 6291456K bytes of memory.
# cisco CRS-16/S-B (Intel 686 F6M14S4) processor with 12582912K bytes of memory.
p5 = re.compile(r'^cisco +(?P<device_family>[a-zA-Z0-9\/\-\s]+)'
r'(?:( +Series))? +\((?P<processor>[a-zA-Z0-9\s]+)\)'
r' +processor +with +(?P<processor_memory_bytes>[0-9A-Z]+)'
r' +bytes +of +memory.$')
# Configuration register on node 0/RSP0/CPU0 is 0x1922
p6 = re.compile(r'\s*Configuration +register +on +node'
' +(?P<node>[A-Z0-9\/]+) +is'
' +(?P<config_register>[x0-9]+)$')
# ASR 9006 4 Line Card Slot Chassis with V2 AC PEM
p7 = re.compile(r'\s*.*Chassis.*$')
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
show_version_dict['operating_system'] = 'IOSXR'
show_version_dict['software_version'] = \
str(m.groupdict()['software_version'])
continue
m = p2.match(line)
if m:
show_version_dict['uptime'] = str(m.groupdict()['uptime'])
continue
m = p3.match(line)
if m:
show_version_dict['image'] = str(m.groupdict()['image'])
continue
m = p4.match(line)
if m:
show_version_dict['device_family'] = \
str(m.groupdict()['device_family'])
continue
m = p5.match(line)
if m:
show_version_dict['device_family'] = \
m.groupdict()['device_family']
show_version_dict['processor'] = m.groupdict()['processor']
show_version_dict['processor_memory_bytes'] = \
m.groupdict()['processor_memory_bytes']
show_version_dict['main_mem'] = line
continue
m = p6.match(line)
if m:
show_version_dict['config_register'] = \
m.groupdict()['config_register']
node = str(m.groupdict()['node'])
if re.search('CPU0', node):
show_version_dict['rp_config_register'] = \
str(m.groupdict()['config_register'])
continue
m = p7.match(line)
if m:
show_version_dict['chassis_detail'] = str(line.strip())
continue
return show_version_dict
# ============================
# Parser for 'show sdr detail'
# ============================
class ShowSdrDetailSchema(MetaParser):
"""Schema for show sdr detail"""
schema = {
'sdr_id':
{Any():
{'sdr_name': str,
Optional('dsdrsc_node'): str,
Optional('dsdrsc_partner_node'): str,
'primary_node1': str,
'primary_node2': str,
Optional('mac_address'): str,
'membership':
{Any():
{'type': str,
'node_status': str,
Optional('red_state'): str,
'partner_name': str,
},
},
},
},
}
class ShowSdrDetail(ShowSdrDetailSchema):
"""Parser for show sdr detail"""
cli_command = 'show sdr detail'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
sdr_detail = {}
for line in out.splitlines():
line = line.rstrip()
# SDR_id : 0
# SDR ID : 2
p1 = re.compile(r'\s*(SDR_id|SDR ID) *: +(?P<sdr_id>[0-9]+)$')
m = p1.match(line)
if m:
if 'sdr_id' not in sdr_detail:
sdr_detail['sdr_id'] = {}
sdr_id = int(m.groupdict()['sdr_id'])
if sdr_id not in sdr_detail['sdr_id']:
sdr_detail['sdr_id'][sdr_id] = {}
continue
# SDR_name : Owner
# SDR name : default-sdr
p2 = re.compile(r'\s*(SDR_name|SDR name) *:'
' +(?P<sdr_name>[a-zA-Z\-]+)$')
m = p2.match(line)
if m:
sdr_detail['sdr_id'][sdr_id]['sdr_name'] = \
str(m.groupdict()['sdr_name'])
continue
# dSDRsc node : 0/0/CPU0
p3 = re.compile(r'\s*dSDRsc +node *:'
' +(?P<dsdrsc_node>[a-zA-Z0-9\/]+)$')
m = p3.match(line)
if m:
sdr_detail['sdr_id'][sdr_id]['dsdrsc_node'] \
= str(m.groupdict()['dsdrsc_node'])
continue
# dSDRsc partner node : NONE
p4 = re.compile(r'\s*dSDRsc +partner +node *:'
' +(?P<dsdrsc_partner_node>[a-zA-Z0-9\/]+)$')
m = p4.match(line)
if m:
sdr_detail['sdr_id'][sdr_id]['dsdrsc_partner_node'] = \
str(m.groupdict()['dsdrsc_partner_node'])
continue
# primary node1 : 0/0/CPU0
# SDR lead (Primary) : 0x1000
p5 = re.compile(r'\s*(primary +node1|SDR +lead +\(Primary\)) *:'
' +(?P<primary_node1>[a-zA-Z0-9\/]+)$')
m = p5.match(line)
if m:
sdr_detail['sdr_id'][sdr_id]['primary_node1'] = \
str(m.groupdict()['primary_node1'])
continue
# primary node2 : NONE
# SDR lead (Backup) : 0xffffffff
p6 = re.compile(r'\s*(primary +node2|SDR +lead +\(Backup\)) *:'
' +(?P<primary_node2>[a-zA-Z0-9\/]+)$')
m = p6.match(line)
if m:
sdr_detail['sdr_id'][sdr_id]['primary_node2'] = \
str(m.groupdict()['primary_node2'])
continue
# mac addr : 025e.eaff.fb57
p7 = re.compile(r'\s*mac +addr *:'
' +(?P<mac_address>[a-zA-Z0-9\.]+)$')
m = p7.match(line)
if m:
sdr_detail['sdr_id'][sdr_id]['mac_address'] = \
str(m.groupdict()['mac_address'])
continue
# RP 0/0/CPU0 IOS XR RUN Primary NONE
# RP 0/RSP0/CPU0 IOS XR RUN Primary 0/RSP1/CPU0
# RP 0/RSP0/CPU0 IOS XR RUN Primary 0/RSP1/CPU0
p8 = re.compile(r'\s*(?P<type>[a-zA-Z0-9\-]+)'
' +(?P<node_name>[a-zA-Z0-9\/]+)'
' +(?P<node_status>[IOS XR RUN|OPERATIONAL]+)'
' +(?P<red_state>[a-zA-Z\/\-]+)?'
' +(?P<partner_name>[a-zA-Z0-9\/]+)$')
m = p8.match(line)
if m:
if 'membership' not in sdr_detail['sdr_id'][sdr_id]:
sdr_detail['sdr_id'][sdr_id]['membership'] = {}
node_name = str(m.groupdict()['node_name']).strip()
if node_name not in sdr_detail['sdr_id'][sdr_id]['membership']:
sdr_detail['sdr_id'][sdr_id]['membership'][node_name] = {}
sdr_detail['sdr_id'][sdr_id]['membership'][node_name]\
['type'] = str(m.groupdict()['type']).strip()
sdr_detail['sdr_id'][sdr_id]['membership'][node_name]\
['node_status'] = \
str(m.groupdict()['node_status']).strip()
sdr_detail['sdr_id'][sdr_id]['membership'][node_name]\
['red_state'] = str(m.groupdict()['red_state']).strip()
sdr_detail['sdr_id'][sdr_id]['membership'][node_name]\
['partner_name'] = \
str(m.groupdict()['partner_name']).strip()
continue
return sdr_detail
# ==========================
# Parser for 'show platform'
# ==========================
class ShowPlatformSchema(MetaParser):
"""Schema for show platform"""
schema = {
'slot':
{Any():
{Any():
{'name': str,
'state': str,
'config_state': str,
'full_slot': str,
Optional('redundancy_state'): str,
Optional('plim'): str,
Optional('subslot'):
{Optional(Any()):
{Optional('name'): str,
Optional('state'): str,
Optional('config_state'): str,
Optional('redundancy_state'): str,
},
},
},
},
},
}
class ShowPlatform(ShowPlatformSchema):
"""Parser for show platform"""
cli_command = 'show platform'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
show_platform = {}
daughtercard_dict = {}
for line in out.splitlines():
entry_is_daughter = False
line = line.rstrip()
# 0/RSP0/CPU0 A9K-RSP440-TR(Active) IOS XR RUN PWR,NSHUT,MON
# 0/0/CPU0 RP(Active) N/A IOS XR RUN PWR,NSHUT,MON
p1 = re.compile(r'\s*(?P<node>[a-zA-Z0-9\/]+)'
' +(?P<name>[a-zA-Z0-9\-]+)'
'(?:\((?P<redundancy_state>[a-zA-Z]+)\))?'
'(?: +(?P<plim>[a-zA-Z\/]+))?'
' +(?P<state>(IOS XR RUN|OK)+)'
' +(?P<config_state>[a-zA-Z\,]+)$')
m = p1.match(line)
if m:
# Parse regexp
node = str(m.groupdict()['node']).strip()
name = str(m.groupdict()['name']).strip()
redundancy_state = str(m.groupdict()['redundancy_state']).strip()
plim = str(m.groupdict()['plim']).strip()
state = str(m.groupdict()['state']).strip()
config_state = str(m.groupdict()['config_state']).strip()
# Parse node for rack, slot, subslot details
parse_node = re.compile(r'\s*(?P<rack>[0-9]+)'
'\/(?P<slot>[0-9A-Z]+)'
'\/(?P<last_entry>[0-9A-Z]+)'
'$').match(node)
rack = str(parse_node.groupdict()['rack'])
slot = rack + '/' + str(parse_node.groupdict()['slot'])
last_entry = str(parse_node.groupdict()['last_entry'])
# Check if subslot/daughtercard
parse_subslot = re.compile(r'.*(0\/0\/[0-9]+).*').match(node)
if parse_subslot and last_entry.isdigit():
# This entry is a daughtercard/subslot
entry_is_daughter = True
subslot = last_entry
# Determine if slot is RP/LineCard/OtherCard
parse_rp = re.compile(r'.*(RSP|RP).*').match(slot)
parse_lc = re.compile(r'.*(0\/0).*').match(slot)
parse_name = re.compile(r'.*(RSP|RP).*').match(name)
if parse_rp or parse_name:
slot_type = 'rp'
elif parse_lc:
slot_type = 'lc'
else:
slot_type = 'oc'
# Set everything
if 'slot' not in show_platform:
show_platform['slot'] = {}
if slot_type not in show_platform['slot']:
show_platform['slot'][slot_type] = {}
if slot not in show_platform['slot'][slot_type]:
show_platform['slot'][slot_type][slot] = {}
show_platform['slot'][slot_type][slot]['name'] = name
show_platform['slot'][slot_type][slot]['full_slot'] = node
show_platform['slot'][slot_type][slot]['state'] = state
show_platform['slot'][slot_type][slot]['config_state'] = config_state
if redundancy_state != 'None':
show_platform['slot'][slot_type][slot]['redundancy_state'] = redundancy_state
if plim != 'None':
show_platform['slot'][slot_type][slot]['plim'] = plim
# Check for daughtercards
if daughtercard_dict and slot in daughtercard_dict:
# Then merge dictionary
show_platform['slot'][slot_type][slot]['subslot'].update(daughtercard_dict[slot])
continue
# Check for daughtercards
if entry_is_daughter:
# Verify parent exists
if slot in show_platform['slot'][slot_type]:
if 'subslot' not in show_platform['slot'][slot_type][slot]:
show_platform['slot'][slot_type][slot]['subslot'] = {}
if subslot not in show_platform['slot'][slot_type][slot]['subslot']:
show_platform['slot'][slot_type][slot]['subslot'][subslot] = {}
show_platform['slot'][slot_type][slot]['subslot'][subslot]['name'] = name
show_platform['slot'][slot_type][slot]['subslot'][subslot]['state'] = state
show_platform['slot'][slot_type][slot]['subslot'][subslot]['config_state'] = config_state
show_platform['slot'][slot_type][slot]['subslot'][subslot]['redundancy_state'] = redundancy_state
continue
else:
# Store in temp dict
if slot not in daughtercard_dict[slot]:
daughtercard_dict[slot] = {}
if 'subslot' not in daughtercard_dict:
daughtercard_dict[slot]['subslot'] = {}
if subslot not in daughtercard_dict[slot]['subslot']:
daughtercard_dict[slot]['subslot'][subslot] = {}
daughtercard_dict[slot]['subslot'][subslot]['name'] = name
daughtercard_dict[slot]['subslot'][subslot]['state'] = state
daughtercard_dict[slot]['subslot'][subslot]['config_state'] = config_state
daughtercard_dict[slot]['subslot'][subslot]['redundancy_state'] = redundancy_state
continue
return show_platform
# =============================
# Parser for 'show platform vm'
# =============================
class ShowPlatformVmSchema(MetaParser):
"""Schema for show platform vm"""
schema = {
'node':
{Any():
{'type': str,
'partner_name': str,
'sw_status': str,
'ip_address': str,
},
},
}
class ShowPlatformVm(ShowPlatformVmSchema):
"""Parser for show platform vm"""
cli_command = 'show platform vm'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
show_platform_vm = {}
for line in out.splitlines():
line = line.strip()
# 0/RP0/CPU0 RP (ACTIVE) NONE FINAL Band 192.0.0.4
# 0/0/CPU0 LC (ACTIVE) NONE FINAL Band 192.0.0.6
# 0/RSP0/CPU0 RP(ACTIVE) 0/RSP1/CPU0 FINAL Band 192.0.0.4
# 0/RSP1/CPU0 RP(STANDBY) 0/RSP0/CPU0 FINAL Band 192.168.166.4
p1 = re.compile(r'^(?P<node>[\S\/]+) +(?P<type>[(RP|LC)\s*\((ACTIVE|STANDBY)\)]+)'
' +(?P<partner_name>[NONE|(?:\S)]+) +(?P<sw_status>[a-zA-Z\s]+)'
' +(?P<ip_address>[\S]+)$')
m = p1.match(line)
if m:
if 'node' not in show_platform_vm:
show_platform_vm['node'] = {}
node = str(m.groupdict()['node']).strip()
if node not in show_platform_vm['node']:
show_platform_vm['node'][node] = {}
show_platform_vm['node'][node]['type'] = \
str(m.groupdict()['type']).strip()
show_platform_vm['node'][node]['partner_name'] = \
str(m.groupdict()['partner_name']).strip()
show_platform_vm['node'][node]['sw_status'] = \
str(m.groupdict()['sw_status']).strip()
show_platform_vm['node'][node]['ip_address'] = \
str(m.groupdict()['ip_address']).strip()
continue
return show_platform_vm
# ========================================
# Schema for 'show install active summary'
# ========================================
class ShowInstallActiveSummarySchema(MetaParser):
"""Schema for show install active summary"""
schema = {
'active_packages': Any(),
Optional('num_active_packages'): int,
Optional('sdr'): str,
}
class ShowInstallActiveSummary(ShowInstallActiveSummarySchema):
"""Parser for show install active summary"""
cli_command = 'show install active summary'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
install_active_dict = {}
previous_line_sdr = False
previous_line_active_packages = False
for line in out.splitlines():
line = line.rstrip()
p1 = re.compile(r'\s*SDRs:*$')
m = p1.match(line)
if m:
previous_line_sdr = True
continue
if previous_line_sdr:
previous_line_sdr = False
install_active_dict['sdr'] = str(line).strip()
continue
# disk0:xrvr-full-x-6.2.1.23I
# disk0:asr9k-mini-px-6.1.21.15I
# xrv9k-xr-6.2.2.14I version=6.2.2.14I [Boot image]
p2 = re.compile(r'\s*Active +Packages:'
' *(?P<num_active_packages>[0-9]+)?$')
m = p2.match(line)
if m:
previous_line_active_packages = True
if 'active_packages' not in install_active_dict:
install_active_dict['active_packages'] = []
if m.groupdict()['num_active_packages']:
install_active_dict['num_active_packages'] = \
int(m.groupdict()['num_active_packages'])
continue
if previous_line_active_packages and line is not None:
clean_line = str(line).strip()
if line and '/' not in line:
install_active_dict['active_packages'].append(clean_line)
continue
return install_active_dict
# ========================================
# Schema for 'show install inactive summary'
# ========================================
class ShowInstallInactiveSummarySchema(MetaParser):
"""Schema for show install inactive summary"""
schema = {
'inactive_packages': Any(),
Optional('num_inactive_packages'): int,
Optional('sdr'): list,
}
class ShowInstallInactiveSummary(ShowInstallInactiveSummarySchema):
"""Parser for show install inactive summary"""
cli_command = 'show install inactive summary'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
install_inactive_dict = {}
previous_line_sdr = False
previous_line_inactive_packages = False
for line in out.splitlines():
line = line.rstrip()
p1 = re.compile(r'\s*SDRs:*$')
m = p1.match(line)
if m:
previous_line_sdr = True
continue
if previous_line_sdr:
previous_line_sdr = False
install_inactive_dict.setdefault('sdr', []).append(str(line).strip())
continue
# disk0:xrvr-full-x-6.2.1.23I
# disk0:asr9k-mini-px-6.1.21.15I
# xrv9k-xr-6.2.2.14I version=6.2.2.14I [Boot image]
p2 = re.compile(r'\s*Inactive +Packages:'
' *(?P<num_inactive_packages>[0-9]+)?$')
m = p2.match(line)
if m:
previous_line_inactive_packages = True
if 'inactive_packages' not in install_inactive_dict:
install_inactive_dict['inactive_packages'] = []
if m.groupdict()['num_inactive_packages']:
install_inactive_dict['num_inactive_packages'] = \
int(m.groupdict()['num_inactive_packages'])
continue
if previous_line_inactive_packages and line is not None:
clean_line = str(line).strip()
if line and '/' not in line:
install_inactive_dict['inactive_packages'].append(clean_line)
continue
return install_inactive_dict
# ========================================
# Schema for 'show install commit summary'
# ========================================
class ShowInstallCommitSummarySchema(MetaParser):
"""Schema for show install commit summary"""
schema = {
Optional('committed_packages'): Any(),
Optional('active_packages'): Any(),
Optional('num_committed_packages'): int,
Optional('sdr'): list,
}
class ShowInstallCommitSummary(ShowInstallCommitSummarySchema):
"""Parser for show install commit summary"""
cli_command = 'show install commit summary'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
install_commit_dict = {}
previous_line_sdr = False
previous_line_committed_packages = False
previous_line_active_packages = False
for line in out.splitlines():
line = line.rstrip()
p1 = re.compile(r'\s*SDRs:*$')
m = p1.match(line)
if m:
previous_line_sdr = True
continue
if previous_line_sdr:
previous_line_sdr = False
install_commit_dict.setdefault('sdr', []).append(str(line).strip())
continue
# disk0:xrvr-full-x-6.2.1.23I
# disk0:asr9k-mini-px-6.1.21.15I
# xrv9k-xr-6.2.2.14I version=6.2.2.14I [Boot image]
p2 = re.compile(r'\s*Committed +Packages:'
' *(?P<num_committed_packages>[0-9]+)?$')
m = p2.match(line)
if m:
previous_line_committed_packages = True
if 'committed_packages' not in install_commit_dict:
install_commit_dict['committed_packages'] = []
if m.groupdict()['num_committed_packages']:
install_commit_dict['num_committed_packages'] = \
int(m.groupdict()['num_committed_packages'])
continue
if previous_line_committed_packages and line is not None:
clean_line = str(line).strip()
if line and '/' not in line:
install_commit_dict['committed_packages'].append(clean_line)
continue
# disk0:xrvr-full-x-6.2.1.23I
# disk0:asr9k-mini-px-6.1.21.15I
# xrv9k-xr-6.2.2.14I version=6.2.2.14I [Boot image]
p2 = re.compile(r'\s*Active +Packages:'
' *(?P<num_active_packages>[0-9]+)?$')
m = p2.match(line)
if m:
previous_line_active_packages = True
if 'active_packages' not in install_commit_dict:
install_commit_dict['active_packages'] = []
if m.groupdict()['num_active_packages']:
install_commit_dict['num_active_packages'] = \
int(m.groupdict()['num_active_packages'])
continue
if previous_line_active_packages and line is not None:
clean_line = str(line).strip()
if line and '/' not in line:
install_commit_dict['active_packages'].append(clean_line)
continue
return install_commit_dict
# ===========================
# Schema for 'show inventory'
# ===========================
class ShowInventorySchema(MetaParser):
"""Schema for show inventory"""
schema = {
'module_name':
{Any():
{'descr': str,
'pid': str,
'vid': str,
Optional('sn'): str,
},
},
}
class ShowInventory(ShowInventorySchema):
"""Parser for show inventory"""
cli_command = 'show inventory'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
inventory_dict = {}
# NAME: "module 0/RSP0/CPU0", DESCR: "ASR9K Route Switch Processor with 440G/slot Fabric and 6GB"
# NAME: "Rack 0", DESCR: "Cisco XRv9K Centralized Virtual Router"
# NAME: "Rack 0", DESCR: "Sherman 1RU Chassis with 24x400GE QSFP56-DD & 12x100G QSFP28"
# NAME: "0/FT4", DESCR: "Sherman Fan Module Reverse Airflow / exhaust, BLUE"
# NAME: "TenGigE0/0/0/0", DESCR: "Cisco SFP+ 10G SR Pluggable Optics Module"
p1 = re.compile(r'^NAME: +\"(?P<module_name>[\S\s]*)\",'
r' +DESCR: +\"(?P<descr>[\S\s]*)\"$')
# PID: A9K-MPA-20X1GE, VID: V02, SN: FOC1811N49J
# PID: SFP-1G-NIC-X , VID: N/A, SN: N/A
# PID: N/A, VID: N/A, SN:
p2 = re.compile(r'^PID: *(?P<pid>[\S\s]*),'
r' +VID: *(?P<vid>[\S\s]*),'
r' SN: *(?P<sn>[\S\s]*)$')
for line in out.splitlines():
line = line.strip()
if not line:
continue
# NAME: "0/FT4", DESCR: "Sherman Fan Module Reverse Airflow / exhaust, BLUE"
# NAME: "TenGigE0/0/0/0", DESCR: "Cisco SFP+ 10G SR Pluggable Optics Module"
m = p1.match(line)
if m:
if 'module_name' not in inventory_dict:
inventory_dict['module_name'] = {}
module_name = str(m.groupdict()['module_name'])
if module_name not in inventory_dict['module_name']:
inventory_dict['module_name'][module_name] = {}
inventory_dict['module_name'][module_name]['descr'] = \
str(m.groupdict()['descr']).strip()
continue
# PID: A9K-MPA-20X1GE, VID: V02, SN: FOC1811N49J
# PID: SFP-1G-NIC-X , VID: N/A, SN: N/A
m = p2.match(line)
if m:
inventory_dict['module_name'][module_name]['pid'] = \
str(m.groupdict()['pid']).strip()
inventory_dict['module_name'][module_name]['vid'] = \
str(m.groupdict()['vid']).strip()
inventory_dict['module_name'][module_name]['sn'] = \
str(m.groupdict()['sn']).strip()
continue
return inventory_dict
# ====================================
# Schema for 'admin show diag chassis'
# ====================================
class AdminShowDiagChassisSchema(MetaParser):
"""Schema for admin show diag chassis"""
schema = {
Optional('device_family'): str,
Optional('device_series'): str,
Optional('num_line_cards'): int,
Optional('chassis_feature'): str,
'rack_num': int,
Optional('sn'): str,
'pid': str,
'vid': str,
Optional('desc'): str,
'clei': str,
Optional('eci'): str,
Optional('pca'): str,
Optional('top_assy_num'): str,
Optional('main'): {
'board_type': str,
'part': str,
'dev': str,
'serial_number': str,
},
Optional('part_number'): str,
Optional('part_revision'): str,
Optional('hw_version'): str,
Optional('top_assembly_block'): {
'serial_number': str,
'part_number': str,
'part_revision': str,
'mfg_deviation': str,
'hw_version': str,
'mfg_bits': str,
}
}
class AdminShowDiagChassis(AdminShowDiagChassisSchema):
"""Parser for admin show diag chassis"""
cli_command = 'admin show diag chassis'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
admin_show_diag_dict = {}
top_assembly_flag = False
main_flag = False
for line in out.splitlines():
line = line.strip()
# Rack 0 - ASR 9006 4 Line Card Slot Chassis with V2 AC PEM
# Rack 0 - Cisco CRS Series 16 Slots Line Card Chassis
# Rack 0 - CRS 16 Slots Line Card Chassis for CRS-16/S-B
p1 = re.compile(r'Rack +(?P<rack_num>\d+) +-'
r' +(?P<device_group>[a-zA-Z0-9\s]+)'
r' +(?P<num_line_cards>\d+)'
r' +((Line +Card +Slot +Chassis +with *)|'
r'Slots +Line +Card +Chassis(?:( +for))? *)'
r'(?P<chassis_feature>[\S ]+)?$')
m = p1.match(line)
if m:
admin_show_diag_dict['rack_num'] = \
int(m.groupdict()['rack_num'])
# ASR 9006
# Cisco CRS Series
# CRS
device_group = m.group(2)
split_device_group = re.split('\s', device_group)
if len(split_device_group)>1:
admin_show_diag_dict['device_family'] = \
split_device_group[0]
device_series = ' '.join(split_device_group[1:])
else:
device_series = split_device_group[0]
admin_show_diag_dict['device_series'] = device_series
admin_show_diag_dict['num_line_cards'] = \
int(m.groupdict()['num_line_cards'])
if m.groupdict()['chassis_feature']:
admin_show_diag_dict['chassis_feature'] = \
str(m.groupdict()['chassis_feature'])
description = line[8:]
admin_show_diag_dict['desc'] = description
continue
# RACK NUM: 0
p2 = re.compile(r'RACK NUM\: *(?P<rack_num>[0-9]+)$')
m = p2.match(line)
if m:
admin_show_diag_dict['rack_num'] = \
int(m.groupdict()['rack_num'])
continue
# S/N: FOX1810G8LR
# Serial Number : FOC23158L99
p3 = re.compile(r'^(S\/N|Serial +Number)(\s+)?(\:)? '
r'+(?P<serial_number>\S+)$')
m = p3.match(line)
if m:
serial_num = str(m.groupdict()['serial_number'])
if top_assembly_flag:
top_assembly_dict['serial_number'] = serial_num
elif main_flag:
main_dict['serial_number'] = serial_num
else:
admin_show_diag_dict['sn'] = serial_num
continue
# PID: ASR-9006-AC-V2
# Product ID : NCS-5501
p4 = re.compile(r'(PID|Product ID)(\s+)?\: '
r'+(?P<pid>[a-zA-Z0-9\-]+)$')
m = p4.match(line)
if m:
admin_show_diag_dict['pid'] = \
str(m.groupdict()['pid'])
continue
# VID: V02
# VID : V01
p5 = re.compile(r'VID(\s+)?\: +(?P<vid>[a-zA-Z0-9\-]+)$')
m = p5.match(line)
if m:
admin_show_diag_dict['vid'] = \
str(m.groupdict()['vid'])
continue
# Desc: ASR 9006 4 Line Card Slot Chassis with V2 AC PEM
p6 = re.compile(r'Desc\: *(?P<desc>[a-zA-Z0-9\-\s]+)$')
m = p6.match(line)
if m:
admin_show_diag_dict['desc'] = \
str(m.groupdict()['desc'])
continue
# CLEI: IPMUP00BRB
# CLEI Code : INM1J10ARA
p7 = re.compile(r'CLEI( +Code\s+)?: +(?P<clei>[a-zA-Z0-9\-]+)$')
m = p7.match(line)
if m:
admin_show_diag_dict['clei'] = \
str(m.groupdict()['clei'])
continue
# Top Assy. Number: 68-4235-02
p8 = re.compile(r'Top +Assy. +Number\:'
' *(?P<top_assy_num>[a-zA-Z0-9\-\s]+)$')
m = p8.match(line)
if m:
admin_show_diag_dict['top_assy_num'] = \
str(m.groupdict()['top_assy_num'])
continue
# PCA: 73-7806-01 rev B0
p9 = re.compile(r'^PCA\: +(?P<pca>[\S ]+)$')
m = p9.match(line)
if m:
admin_show_diag_dict['pca'] = \
str(m.groupdict()['pca'])
continue
# ECI: 459651
p10 = re.compile(r'^ECI\: +(?P<eci>[\S ]+)$')
m = p10.match(line)
if m:
admin_show_diag_dict['eci'] = \
str(m.groupdict()['eci'])
continue
# MAIN: board type 500060
p11 = re.compile(r'^MAIN\: +board +type +(?P<board_type>[\S ]+)$')
m = p11.match(line)
if m:
main_dict = admin_show_diag_dict.setdefault('main', {})
main_dict['board_type'] = \
str(m.groupdict()['board_type'])
continue
# 800-25021-05 rev B0
p12 = re.compile(r'^\S+ +rev +\S+')
m = p12.match(line)
if m:
main_dict = admin_show_diag_dict.setdefault('main', {})
main_dict['part'] = line.strip()
continue
# dev 080366, 080181
p13 = re.compile(r'\s*dev +(?P<dev>[\S ]+)')
m = p13.match(line)
if m:
dev = m.groupdict()['dev']
main_dict = admin_show_diag_dict.setdefault('main', {})
main_flag = True
main_dict['dev'] = dev
continue
# 0 Rack 0-IDPROM Info
p15 = re.compile(r'(?P<rack_num>[0-9]+) +Rack +\d\-IDPROM +Info$')
m15 = p15.match(line)
if m15:
admin_show_diag_dict['rack_num'] = \
int(m15.groupdict()['rack_num'])
continue
# Top +Assembly +Block\:$
p16 = re.compile(r'Top +Assembly +Block\:$')
m16 = p16.match(line)
if m16:
top_assembly_flag = True
top_assembly_dict = admin_show_diag_dict.setdefault('top_assembly_block', {})
continue
# Part Number : 73-101057-02
p17 = re.compile(r'^Part +(n|N)umber(\s+)?\: +(?P<part_number>\S+)$')
m17 = p17.match(line)
if m17:
part_num = str(m17.groupdict()['part_number'])
if top_assembly_flag:
top_assembly_dict['part_number'] = part_num
else:
admin_show_diag_dict['part_number'] = part_num
continue
# Part Revision : D0
p18 = re.compile(r'^Part +(r|R)evision(\s+)?\: +(?P<part_revision>\S+)$')
m18 = p18.match(line)
if m18:
part_rev = str(m18.groupdict()['part_revision'])
if top_assembly_flag:
top_assembly_dict['part_revision'] = part_rev
else:
admin_show_diag_dict['part_revision'] = part_rev
continue
# H/W Version : 1.0
p19 = re.compile(r'^H\/W +[v|V]ersion(\s+)?\: +(?P<hw_version>\S+)$')
m19 = p19.match(line)
if m19:
hw_ver = str(m19.groupdict()['hw_version'])
if top_assembly_flag:
top_assembly_dict['hw_version'] = hw_ver
else:
admin_show_diag_dict['hw_version'] = hw_ver
# Mfg Deviation : 0
p20 = re.compile(r'^[M|m]fg +[D|d]eviation(\s+)?\: '
r'+(?P<mfg_deviation>\S+)$')
m20 = p20.match(line)
if m20:
mfg_dev = str(m20.groupdict()['mfg_deviation'])
top_assembly_dict['mfg_deviation'] = mfg_dev
continue
# Mfg Bits : 1
p21 = re.compile(r'^[M|m]fg +Bits(\s+)?\: +(?P<mfg_bits>\S+)$')
m21 = p21.match(line)
if m21:
mfg_bit = str(m21.groupdict()['mfg_bits'])
top_assembly_dict['mfg_bits'] = mfg_bit
top_assembly_flag = False
continue
return admin_show_diag_dict
# ====================================
# Schema for 'show redundancy summary'
# ====================================
class ShowRedundancySummarySchema(MetaParser):
"""Schema for show redundancy summary"""
schema = {
'node':
{Any():
{'type': str,
Optional('standby_node'): str,
Optional('backup_node'): str,
Optional('node_detail'): str,
},
},
Optional('redundancy_communication'): bool,
}
class ShowRedundancySummary(ShowRedundancySummarySchema):
"""Parser for show redundancy summary"""
cli_command = 'show redundancy summary'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
redundancy_summary = {}
redundancy_communication = False
for line in out.splitlines():
line = line.rstrip()
p0 = re.compile(r'\s*Active.*$')
m = p0.match(line)
if m:
continue
# 0/RSP0/CPU0(A) 0/RSP1/CPU0(S) (Node Not Ready, NSR: Not Configured)
p1 = re.compile(r'\s*(?P<node>[a-zA-Z0-9\/\(\)]+)'
' +(?P<standby_node>[a-zA-Z0-9\/\(\)]+)'
' +\((?P<node_detail>[a-zA-Z\,\:\s]+)\)$')
m = p1.match(line)
if m:
if 'node' not in redundancy_summary:
redundancy_summary['node'] = {}
# Check if node type is active or primary
node = str(m.groupdict()['node']).strip()
if re.search("\(P\)", node):
type = 'primary'
else:
type = 'active'
# Check standby or backup node
backup_node = None
standby_node = str(m.groupdict()['standby_node'])
if re.search("\(B\)", standby_node):
backup_node = standby_node
elif standby_node == 'N/A':
continue
# set everything
redundancy_summary['node'][node] = {}
redundancy_summary['node'][node]['type'] = type
redundancy_summary['node'][node]['standby_node'] = \
standby_node
redundancy_summary['node'][node]['node_detail'] = \
str(m.groupdict()['node_detail'])
if re.search(r'NSR: +Ready', str(m.groupdict()['node_detail'])):
redundancy_communication = True
redundancy_summary['redundancy_communication'] = redundancy_communication
if backup_node is not None:
redundancy_summary['node'][node]['backup_node'] = \
backup_node
continue
# 0/0/CPU0 N/A
p2 = re.compile(r'\s*(?P<node>[a-zA-Z0-9\/\(\)]+)'
' +(?P<standby_node>[a-zA-Z0-9\/\(\)]+)$')
m = p2.match(line)
if m:
if 'node' not in redundancy_summary:
redundancy_summary['node'] = {}
# Check if node type is active or primary
node = str(m.groupdict()['node']).strip()
if re.search("\(P\)", node):
type = 'primary'
else:
type = 'active'
# Check standby or backup node
backup_node = None
standby_node = str(m.groupdict()['standby_node'])
if re.search("\(B\)", standby_node):
backup_node = standby_node
# set everything
redundancy_summary['node'][node] = {}
redundancy_summary['node'][node]['type'] = type
redundancy_summary['node'][node]['standby_node'] = \
standby_node
if backup_node is not None:
redundancy_summary['node'][node]['backup_node'] = \
backup_node
continue
return redundancy_summary
# ============================
# Schema for 'show redundancy'
# ============================
class ShowRedundancySchema(MetaParser):
"""Schema for show redundancy"""
schema = {
'node':
{Any():
{'role': str,
Optional('valid_partner'): str,
Optional('ready'): str,
Optional('group'):
{Any():
{'primary': str,
'backup': str,
'status': str,
},
},
Optional('primary_rmf_state'): str,
Optional('primary_rmf_state_reason'): str,
'last_reload_timestamp': str,
'time_since_last_reload': str,
'node_uptime': str,
'node_uptime_timestamp': str,
'node_uptime_in_seconds': int,
Optional('standby_node'): str,
Optional('backup_process'): str,
Optional('last_switchover_timepstamp'): str,
Optional('time_since_last_switchover'): str,
Optional('standby_node_timestamp'): str,
Optional('time_since_standby_boot'): str,
Optional('standby_node_not_ready'): str,
Optional('time_since_standby_node_not_ready'): str,
Optional('standby_node_ready'):str,
Optional('time_since_standby_node_ready'): str,
Optional('reload_cause'): str
},
},
}
class ShowRedundancy(ShowRedundancySchema):
"""Parser for show redundancy"""
cli_command = 'show redundancy'
exclude = ['node_uptime', 'time_since_standby_boot',
'time_since_last_reload', 'time_since_last_switchover',
'time_since_standby_node_not_ready', 'time_since_standby_node_ready',
'standby_node_not_ready', 'standby_node_ready',
'standby_node_timestamp', 'node_uptime_in_seconds', 'iteration:']
def cli(self,output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
redundancy_dict = {}
for line in out.splitlines():
line = line.rstrip()
# Redundancy information for node 0/RP0/CPU0
p1 = re.compile(r'\s*Redundancy +information +for +node'
' +(?P<node>[a-zA-Z0-9\/]+):$')
m = p1.match(line)
if m:
if 'node' not in redundancy_dict:
redundancy_dict['node'] = {}
node = str(m.groupdict()['node'])
if node not in redundancy_dict['node']:
redundancy_dict['node'][node] = {}
continue
# Node 0/RSP0/CPU0 is in ACTIVE role
p2 = re.compile(r'\s*Node +([a-zA-Z0-9\/]+) +is +in'
' +(?P<role>[a-zA-Z]+) +role$')
m = p2.match(line)
if m:
redundancy_dict['node'][node]['role'] = \
str(m.groupdict()['role'])
continue
# Node Redundancy Partner (0/RSP1/CPU0) is in STANDBY role
p3_1 = re.compile(r'\s*Node *Redundancy *Partner'
' *\((?P<node>[a-zA-Z0-9\/]+)\) *is *in'
' *(?P<role>[a-zA-Z]+) *role$')
m = p3_1.match(line)
if m:
if 'standby' in str(m.groupdict()['role']).lower():
redundancy_dict['node'][node]['standby_node'] = str(m.groupdict()['node'])
continue
# Process Redundancy Partner (0/RSP0/CPU0) is in BACKUP role
p3_3 = re.compile(r'\s*Process *Redundancy *Partner'
' *\((?P<node>[a-zA-Z0-9\/]+)\) *is *in'
' *(?P<role>[a-zA-Z]+) *role$')
m = p3_3.match(line)
if m:
if 'backup' in str(m.groupdict()['role']).lower():
redundancy_dict['node'][node]['backup_process'] = str(m.groupdict()['node'])
continue
# Standby node in 0/RSP1/CPU0 is ready
# Standby node in 0/RSP1/CPU0 is NSR-ready
p3_2 = re.compile(r'\s*Standby *node *in *([a-zA-Z0-9\/]+)'
' *is *(?P<ready>[a-zA-Z\-]+)$')
m = p3_2.match(line)
if m:
redundancy_dict['node'][node]['ready'] = \
str(m.groupdict()['ready'])
continue
# Node 0/RP0/CPU0 has no valid partner
p3 = re.compile(r'\s*Node +([a-zA-Z0-9\/]+) +has +(?P<valid_partner>\S+)'
' +valid +partner$')
m = p3.match(line)
if m:
redundancy_dict['node'][node]['valid_partner'] = \
str(m.groupdict()['valid_partner'])
continue
# v6-routing 0/RSP0/CPU0 N/A Not Ready
p4 = re.compile(r'\s*(?P<group>[a-zA-Z0-9\-]+)'
' +(?P<primary>[A-Z0-9\/]+)'
' +(?P<backup>[A-Z0-9\/]+)'
' +(?P<status>[a-zA-Z\-\s]+)$')
m = p4.match(line)
if m:
if 'group' not in redundancy_dict['node'][node]:
redundancy_dict['node'][node]['group'] = {}
group = str(m.groupdict()['group'])
if group not in redundancy_dict['node'][node]['group']:
redundancy_dict['node'][node]['group'][group] = {}
redundancy_dict['node'][node]['group'][group]['primary'] = \
str(m.groupdict()['primary'])
redundancy_dict['node'][node]['group'][group]['backup'] = \
str(m.groupdict()['backup'])
redundancy_dict['node'][node]['group'][group]['status'] = \
str(m.groupdict()['status'])
continue
# NSR not ready since Backup is not Present
p5 = re.compile(r'\s*NSR +(?P<primary_rmf_state>[a-zA-Z\s]+) +since'
' +(?P<primary_rmf_state_reason>[a-zA-Z\s]+)$')
m = p5.match(line)
if m:
redundancy_dict['node'][node]['primary_rmf_state'] = \
str(m.groupdict()['primary_rmf_state'])
redundancy_dict['node'][node]\
['primary_rmf_state_reason'] = \
str(m.groupdict()['primary_rmf_state_reason'])
continue
# A9K-RSP440-TR reloaded Thu Apr 27 02:14:12 2017: 1 hour, 16 minutes ago
p6 = re.compile(r'\s*(?P<node_name>[a-zA-Z0-9\-]+) +reloaded'
' +(?P<last_reload_timestamp>[a-zA-Z0-9\:\s]+):'
' +(?P<time_since_last_reload>[a-zA-Z0-9\,\s]+)$')
m = p6.match(line)
if m:
redundancy_dict['node'][node]['last_reload_timestamp'] =\
str(m.groupdict()['last_reload_timestamp'])
redundancy_dict['node'][node]['time_since_last_reload'] =\
str(m.groupdict()['time_since_last_reload'])
continue
# Active node booted Thu Apr 27 03:22:37 2017: 8 minutes ago
# Active node booted Thu Jan 11 12:31:59 2018: 5 days, 23 hours, ago
# Active node booted Tue Jan 2 07:32:33 2018: 1 day, 1 hour, 6 minutes ago
# Active node booted Thu Jan 11 12:32:03 2018: 1 week, 4 days, 20 hours, 19 minutes ago
p7 = re.compile(r'\s*Active +node +booted'
' +(?P<node_uptime_timestamp>[a-zA-Z0-9\:\s]+):'
' +(?P<node_uptime>((?P<ignore>\d+ \w+, *)?((?P<week>\d+) +(week|weeks), )?'
'(((?P<day>\d+) +(day|days))?, )?)?(((?P<hour>\d+) +(hour|hours))?, )?'
'(((?P<minute>\d+) +(minute|minutes))|((?P<second>\d+) +(seconds|seconds)))?) +ago$')
m = p7.match(line)
if m:
redundancy_dict['node'][node]['node_uptime_timestamp'] = \
str(m.groupdict()['node_uptime_timestamp'])
redundancy_dict['node'][node]['node_uptime'] = \
str(m.groupdict()['node_uptime'])
time_in_seconds = 0
if m.groupdict()['week']:
time_in_seconds += int(m.groupdict()['week']) * 7 * 86400
if m.groupdict()['day']:
time_in_seconds += int(m.groupdict()['day']) * 86400
if m.groupdict()['hour']:
time_in_seconds += int(m.groupdict()['hour']) * 3600
if m.groupdict()['minute']:
time_in_seconds += int(m.groupdict()['minute']) * 60
if m.groupdict()['second']:
time_in_seconds += int(m.groupdict()['second'])
redundancy_dict['node'][node]['node_uptime_in_seconds'] = \
time_in_seconds
continue
# Standby node boot Thu Aug 10 08:29:18 2017: 1 day, 32 minutes ago
p7_1 = re.compile(r'\s*Standby +node +boot'
' +(?P<standby_node_timestamp>[a-zA-Z0-9\:\s]+):'
' +(?P<time_since_standby_boot>[a-zA-Z0-9\,\s]+)$')
m = p7_1.match(line)
if m:
standby_node_timestamp = str(m.groupdict()['standby_node_timestamp'])
time_since_standby_boot = str(m.groupdict()['time_since_standby_boot'])
redundancy_dict['node'][node]['standby_node_timestamp'] = \
standby_node_timestamp
redundancy_dict['node'][node]['time_since_standby_boot'] = \
time_since_standby_boot
continue
# Standby node last went not ready Fri Aug 11 07:13:26 2017: 1 hour, 48 minutes ago
# Standby node last went ready Fri Aug 11 07:13:26 2017: 1 hour, 48 minutes ago
p7_2 = re.compile(r'\s*Standby *node *last *went *not *ready'
' *(?P<standby_node_not_ready>[a-zA-Z0-9\:\s]+):'
' *(?P<time_since_standby_node_not_ready>[a-zA-Z0-9\,\s]+)$')
m = p7_2.match(line)
if m:
standby_node_not_ready = str(m.groupdict()['standby_node_not_ready'])
time_since_standby_node_not_ready = str(m.groupdict()['time_since_standby_node_not_ready'])
redundancy_dict['node'][node]['standby_node_not_ready'] = \
standby_node_not_ready
redundancy_dict['node'][node]['time_since_standby_node_not_ready'] = \
time_since_standby_node_not_ready
continue
p7_3 = re.compile(r'\s*Standby *node *last *went *ready'
' *(?P<standby_node_ready>[a-zA-Z0-9\:\s]+):'
' *(?P<time_since_standby_node_ready>[a-zA-Z0-9\,\s]+)$')
m = p7_3.match(line)
if m:
standby_node_ready = str(m.groupdict()['standby_node_ready'])
time_since_standby_node_ready = str(m.groupdict()['time_since_standby_node_ready'])
redundancy_dict['node'][node]['standby_node_ready'] = \
standby_node_ready
redundancy_dict['node'][node]['time_since_standby_node_ready'] = \
time_since_standby_node_ready
continue
# Last switch-over Thu Apr 27 03:29:57 2017: 1 minute ago
p8 = re.compile(r'\s*Last +switch-over'
' +(?P<last_switchover_timepstamp>[a-zA-Z0-9\:\s]+):'
' +(?P<time_since_last_switchover>[a-zA-Z0-9\,\s]+)$')
m = p8.match(line)
if m:
redundancy_dict['node'][node]['last_switchover_timepstamp'] = \
str(m.groupdict()['last_switchover_timepstamp'])
redundancy_dict['node'][node]['time_since_last_switchover'] = \
str(m.groupdict()['time_since_last_switchover'])
continue
# Active node reload Cause: Initiating switch-over.
p9 = re.compile(r'\s*Active +node +reload *(?:Cause)?:'
' +(?P<reload_cause>[a-zA-Z\-\s]+).$')
m = p9.match(line)
if m:
redundancy_dict['node'][node]['reload_cause'] = \
str(m.groupdict()['reload_cause'])
continue
return redundancy_dict
# ================
# Schema for 'dir'
# ================
class DirSchema(MetaParser):
"""Schema for dir"""
schema = {
'dir': {
'dir_name': str,
'total_bytes': str,
'total_free_bytes': str,
Optional('files'):
{Any():
{Optional('size'): str,
Optional('date'): str,
Optional('permission'): str,
Optional('index'): str,
Optional('time'): str}
},
},
}
class Dir(DirSchema):
"""Parser for dir"""
cli_command = ['dir', 'dir {directory}']
exclude = ['size', 'time', 'total_free_bytes', 'date', 'index']
def cli(self, directory='', output=None):
if output is None:
if directory:
out = self.device.execute(self.cli_command[1].format(directory=directory))
else:
out = self.device.execute(self.cli_command[0])
else:
out = output
# Init vars
dir_dict = {}
for line in out.splitlines():
line = line.rstrip()
# Directory of /misc/scratch
# Directory of disk0a:/usr
p1 = re.compile(r'\s*Directory +of'
' +(?P<dir_name>[a-zA-Z0-9\:\/]+)$')
m = p1.match(line)
if m:
if 'dir' not in dir_dict:
dir_dict['dir'] = {}
dir_dict['dir']['dir_name'] = str(m.groupdict()['dir_name'])
continue
# 1012660 kbytes total (939092 kbytes free)
# 2562719744 bytes total (1918621184 bytes free)
p2 = re.compile(r'\s*(?P<total_bytes>[0-9]+ +(kbytes|bytes))'
' +total +\((?P<total_free_bytes>[0-9]+'
' +(kbytes|bytes)) +free\)$')
m = p2.match(line)
if m:
dir_dict['dir']['total_bytes'] = \
str(m.groupdict()['total_bytes'])
dir_dict['dir']['total_free_bytes'] = \
str(m.groupdict()['total_free_bytes'])
continue
# 20 -rw-r--r-- 1 773 May 10 2017 cvac.log
# 15 lrwxrwxrwx 1 12 May 10 2017 config -> /misc/config
# 11 drwx------ 2 16384 Mar 28 12:23 lost+found
# 14 -rw-r--r--. 1 10429 Oct 26 16:17 pnet_cfg.log
p3 = re.compile(r'^\s*(?P<index>[0-9]+) +(?P<permission>[a-z\-]+)(\.)? '
'+(?P<unknown>[0-9]+) +(?P<size>[0-9]+) +(?P<month>[a-zA-Z]+) '
'+(?P<day>[0-9]+) +(?P<year>[0-9\:]+) '
'+(?P<file>[a-zA-Z0-9\.\/\_\-\+\>\s]+)$')
m = p3.match(line)
if m:
file = m.groupdict()['file']
date = m.groupdict()['month'].strip() \
+ ' ' + m.groupdict()['day'].strip() + ' ' \
+ m.groupdict()['year'].strip()
if 'files' not in dir_dict['dir']:
dir_dict['dir']['files'] = {}
dir_dict['dir']['files'][file] = {}
dir_dict['dir']['files'][file]['size'] = m.groupdict()['size']
dir_dict['dir']['files'][file]['permission'] = \
m.groupdict()['permission']
dir_dict['dir']['files'][file]['index'] = m.groupdict()['index']
dir_dict['dir']['files'][file]['date'] = date
continue
return dir_dict
# vim: ft=python et sw=4
| 40.449322
| 125
| 0.465223
|
6ad5444041e41103d8caef6b528585a77533e037
| 17,586
|
py
|
Python
|
google/ads/googleads/v4/services/services/keyword_view_service/client.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/services/services/keyword_view_service/client.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/services/services/keyword_view_service/client.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v4.resources.types import keyword_view
from google.ads.googleads.v4.services.types import keyword_view_service
from .transports.base import KeywordViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import KeywordViewServiceGrpcTransport
class KeywordViewServiceClientMeta(type):
"""Metaclass for the KeywordViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[KeywordViewServiceTransport]]
_transport_registry["grpc"] = KeywordViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[KeywordViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class KeywordViewServiceClient(metaclass=KeywordViewServiceClientMeta):
"""Service to manage keyword views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
KeywordViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
KeywordViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> KeywordViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
KeywordViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def keyword_view_path(customer: str, keyword_view: str,) -> str:
"""Return a fully-qualified keyword_view string."""
return "customers/{customer}/keywordViews/{keyword_view}".format(
customer=customer, keyword_view=keyword_view,
)
@staticmethod
def parse_keyword_view_path(path: str) -> Dict[str, str]:
"""Parse a keyword_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer>.+?)/keywordViews/(?P<keyword_view>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, KeywordViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the keyword view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.KeywordViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, KeywordViewServiceTransport):
# transport is a KeywordViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = KeywordViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_keyword_view(
self,
request: keyword_view_service.GetKeywordViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> keyword_view.KeywordView:
r"""Returns the requested keyword view in full detail.
Args:
request (:class:`google.ads.googleads.v4.services.types.GetKeywordViewRequest`):
The request object. Request message for
[KeywordViewService.GetKeywordView][google.ads.googleads.v4.services.KeywordViewService.GetKeywordView].
resource_name (:class:`str`):
Required. The resource name of the
keyword view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v4.resources.types.KeywordView:
A keyword view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a keyword_view_service.GetKeywordViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, keyword_view_service.GetKeywordViewRequest):
request = keyword_view_service.GetKeywordViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_keyword_view]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("KeywordViewServiceClient",)
| 40.708333
| 120
| 0.637439
|
df09567d4892044b4f5d5cdba93f80b181ef202d
| 6,160
|
py
|
Python
|
Controller/NovoFornecedorController.py
|
rennancockles/gigapy
|
4fb0cd97e689c6460254a466a7a8d9137c2392b1
|
[
"MIT"
] | null | null | null |
Controller/NovoFornecedorController.py
|
rennancockles/gigapy
|
4fb0cd97e689c6460254a466a7a8d9137c2392b1
|
[
"MIT"
] | null | null | null |
Controller/NovoFornecedorController.py
|
rennancockles/gigapy
|
4fb0cd97e689c6460254a466a7a8d9137c2392b1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from View import NovoFornecedorView
from DAO import FornecedorDAO
from Model.TelefoneModel import Telefone
from Model.EmailModel import Email
from Model.FornecedorModel import Fornecedor
from PyQt4 import QtGui
class NovoFornecedor(QtGui.QMainWindow, NovoFornecedorView.Ui_NovoFornecedor):
def __init__(self, parent=None):
super(NovoFornecedor, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.telefoneId = 0
self.telefone_row_count = 0
self.telefones = []
self.emailId = 0
self.email_row_count = 0
self.emails = []
self.btnCancelar.clicked.connect(self.close_view)
self.btnSalvar.clicked.connect(self.salvar)
self.btnTelefoneAdd.clicked.connect(self.add_telefone)
self.btnTelefoneRemove.clicked.connect(self.remove_telefone)
self.btnEmailAdd.clicked.connect(self.add_email)
self.btnEmailRemove.clicked.connect(self.remove_email)
def close_view(self):
self.close()
def salvar(self):
nome = self.tbNome.toPlainText()
desc = self.tbDescricao.toPlainText()
cidade = self.tbCidade.toPlainText()
endereco = self.tbEndereco.toPlainText()
bairro = self.tbBairro.toPlainText()
numero = self.tbNumero.toPlainText()
if nome == '' or desc == '' or cidade == '' or endereco == '' or bairro == '' or numero == '':
QtGui.QMessageBox.warning(self, "Erro", "Preencha os campos corretamente!")
return
parsed_num = self.parse_numero(numero)
if parsed_num is None:
return
fornecedor = Fornecedor(nome=nome, descricao=desc, cidade=cidade, endereco=endereco, bairro=bairro, numero=numero)
fornecedor.add_emails(self.emails)
fornecedor.add_telefones(self.telefones)
FornecedorDAO.insert(fornecedor)
print fornecedor
self.close_view()
def add_telefone(self):
ddd = self.verify_ddd(self.tbTelefoneDDD.toPlainText())
numero = self.verify_telefone(self.tbTelefoneNumero.toPlainText())
referencia = self.tbTelefoneReferencia.toPlainText()
if numero is None or ddd is None:
return
if ddd == '' or numero == '' or referencia == '':
QtGui.QMessageBox.warning(self, "Erro", "Preencha os campos corretamente!")
return
telefone = Telefone(ddd=ddd, numero=numero, referencia=referencia)
self.telefones.append(telefone)
self.telefoneId += 1
row = self.telefone_row_count
self.telefone_row_count += 1
self.tblTelefone.setRowCount(self.telefone_row_count)
item = QtGui.QTableWidgetItem()
self.tblTelefone.setVerticalHeaderItem(row, item)
item = QtGui.QTableWidgetItem()
self.tblTelefone.setItem(row, 0, item)
item = QtGui.QTableWidgetItem()
self.tblTelefone.setItem(row, 1, item)
item = QtGui.QTableWidgetItem()
self.tblTelefone.setItem(row, 2, item)
item = self.tblTelefone.item(row, 0)
item.setText(telefone.ddd)
item = self.tblTelefone.item(row, 1)
item.setText(telefone.numero)
item = self.tblTelefone.item(row, 2)
item.setText(telefone.referencia)
self.tbTelefoneDDD.clear()
self.tbTelefoneNumero.clear()
self.tbTelefoneReferencia.clear()
def remove_telefone(self):
row = self.tblTelefone.currentRow()
self.telefone_row_count -= 1
self.telefones.pop(row)
self.tblTelefone.removeRow(row)
def add_email(self):
email = self.verify_email(self.tbEmail.toPlainText())
referencia = self.tbEmailReferencia.toPlainText()
if email is None:
return
if email == '' or referencia == '':
QtGui.QMessageBox.warning(self, "Erro", "Preencha os campos corretamente!")
return
email = Email(email=email, referencia=referencia)
self.emails.append(email)
self.emailId += 1
row = self.email_row_count
self.email_row_count += 1
self.tblEmail.setRowCount(self.email_row_count)
item = QtGui.QTableWidgetItem()
self.tblEmail.setVerticalHeaderItem(row, item)
item = QtGui.QTableWidgetItem()
self.tblEmail.setItem(row, 0, item)
item = QtGui.QTableWidgetItem()
self.tblEmail.setItem(row, 1, item)
item = self.tblEmail.item(row, 0)
item.setText(email.email)
item = self.tblEmail.item(row, 1)
item.setText(email.referencia)
self.tbEmail.clear()
self.tbEmailReferencia.clear()
def remove_email(self):
row = self.tblEmail.currentRow()
self.email_row_count -= 1
self.emails.pop(row)
self.tblEmail.removeRow(row)
def parse_numero(self, string):
try:
i = int(string)
return i
except:
QtGui.QMessageBox.warning(self, "Erro", "Numero deve ser do tipo inteiro!".format(string))
return
def verify_email(self, email):
import re
addressToVerify = email
match = re.match('^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$', addressToVerify)
if match is None:
QtGui.QMessageBox.warning(self, "Erro", "Email invalido!")
return None
else:
return email
def verify_telefone(self, telefone):
import re
addressToVerify = telefone
match = re.match('(\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4})', addressToVerify)
if match is None:
QtGui.QMessageBox.warning(self, "Erro", "Telefone invalido!")
return None
else:
return telefone
def verify_ddd(self, ddd):
import re
addressToVerify = ddd
match = re.match('^\d{2}$', addressToVerify)
if match is None:
QtGui.QMessageBox.warning(self, "Erro", "DDD invalido!")
return None
else:
return ddd
| 31.428571
| 132
| 0.618994
|
447ecf981b52993051e05b9b3407b5ba591ea2ee
| 3,322
|
py
|
Python
|
saleor/menu/models.py
|
mmmcorpsvit/saleor
|
09e40a2af549109fbc2f8e82c68a195053e8224e
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/menu/models.py
|
mmmcorpsvit/saleor
|
09e40a2af549109fbc2f8e82c68a195053e8224e
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/menu/models.py
|
mmmcorpsvit/saleor
|
09e40a2af549109fbc2f8e82c68a195053e8224e
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.utils.translation import pgettext_lazy
from mptt.managers import TreeManager
from mptt.models import MPTTModel
from ..core.models import SortableModel
from ..core.utils.translations import TranslationProxy
from ..page.models import Page
from ..product.models import Category, Collection
class Menu(models.Model):
name = models.CharField(max_length=128)
json_content = JSONField(blank=True, default=dict)
class Meta:
permissions = ((
'manage_menus', pgettext_lazy(
'Permission description', 'Manage navigation.')),)
def __str__(self):
return self.name
class MenuItem(MPTTModel, SortableModel):
menu = models.ForeignKey(
Menu, related_name='items', on_delete=models.CASCADE)
name = models.CharField(max_length=128)
parent = models.ForeignKey(
'self', null=True, blank=True, related_name='children',
on_delete=models.CASCADE)
# not mandatory fields, usage depends on what type of link is stored
url = models.URLField(max_length=256, blank=True, null=True)
category = models.ForeignKey(
Category, blank=True, null=True, on_delete=models.CASCADE)
collection = models.ForeignKey(
Collection, blank=True, null=True, on_delete=models.CASCADE)
page = models.ForeignKey(
Page, blank=True, null=True, on_delete=models.CASCADE)
objects = models.Manager()
tree = TreeManager()
translated = TranslationProxy()
class Meta:
ordering = ('sort_order',)
app_label = 'menu'
def __str__(self):
return self.name
def get_ordering_queryset(self):
return (
self.menu.items.all() if not self.parent
else self.parent.children.all())
@property
def linked_object(self):
return self.category or self.collection or self.page
@property
def destination_display(self):
linked_object = self.linked_object
if not linked_object:
prefix = pgettext_lazy('Link object type description', 'URL: ')
return prefix + self.url
if isinstance(linked_object, Category):
prefix = pgettext_lazy(
'Link object type description', 'Category: ')
elif isinstance(linked_object, Collection):
prefix = pgettext_lazy(
'Link object type description', 'Collection: ')
else:
prefix = pgettext_lazy(
'Link object type description', 'Page: ')
return prefix + str(linked_object)
def get_url(self):
linked_object = self.linked_object
return linked_object.get_absolute_url() if linked_object else self.url
class MenuItemTranslation(models.Model):
language_code = models.CharField(max_length=10)
menu_item = models.ForeignKey(
MenuItem, related_name='translations', on_delete=models.CASCADE)
name = models.CharField(max_length=128)
class Meta:
unique_together = (('language_code', 'menu_item'),)
def __repr__(self):
class_ = type(self)
return '%s(pk=%r, name=%r, menu_item_pk=%r)' % (
class_.__name__, self.pk, self.name, self.menu_item_id)
def __str__(self):
return self.name
| 31.942308
| 78
| 0.665563
|
01cb3746d1820266b80596bef731013d68387939
| 439
|
py
|
Python
|
pol/ptst.py
|
mlares/CMB_polarization
|
936d17d0be81564dbae96d8aae0cb9f824f8a94d
|
[
"MIT"
] | null | null | null |
pol/ptst.py
|
mlares/CMB_polarization
|
936d17d0be81564dbae96d8aae0cb9f824f8a94d
|
[
"MIT"
] | null | null | null |
pol/ptst.py
|
mlares/CMB_polarization
|
936d17d0be81564dbae96d8aae0cb9f824f8a94d
|
[
"MIT"
] | null | null | null |
from Parser import Parser
from sys import argv
#import PixelSky as pxs
import cmfg
#from configparser import ConfigParser
if len(argv) > 1:
config = Parser(argv[1])
else:
config = Parser()
#filename = pxs.check_file(argv)
#config = ConfigParser()
#config.read(filename)
#config = Parser('../set/POL03.ini')
#X = cmfg.profile2d(config)
#X.load_centers()
#X.select_subsample_centers()
| 19.086957
| 38
| 0.653759
|
dc040750089650045d2f2ec4f082ac76b18ea976
| 9,014
|
py
|
Python
|
venv/Lib/site-packages/celery/bin/worker.py
|
Verckolf/MyInterfaceTest
|
e05674bd673a6a43cfb33f7cb4318886ba92a05c
|
[
"MIT"
] | 39
|
2016-12-05T14:36:37.000Z
|
2021-07-29T18:22:34.000Z
|
microblog/flask/venv/lib/python2.7/site-packages/celery/bin/worker.py
|
johankaito/fufuka
|
32a96ecf98ce305c2206c38443e58fdec88c788d
|
[
"Apache-2.0"
] | 68
|
2016-12-12T20:38:47.000Z
|
2020-07-26T18:28:49.000Z
|
microblog/flask/venv/lib/python2.7/site-packages/celery/bin/worker.py
|
johankaito/fufuka
|
32a96ecf98ce305c2206c38443e58fdec88c788d
|
[
"Apache-2.0"
] | 120
|
2016-08-18T14:53:03.000Z
|
2020-06-16T13:27:20.000Z
|
# -*- coding: utf-8 -*-
"""
The :program:`celery worker` command (previously known as ``celeryd``)
.. program:: celery worker
.. seealso::
See :ref:`preload-options`.
.. cmdoption:: -c, --concurrency
Number of child processes processing the queue. The default
is the number of CPUs available on your system.
.. cmdoption:: -P, --pool
Pool implementation:
prefork (default), eventlet, gevent, solo or threads.
.. cmdoption:: -f, --logfile
Path to log file. If no logfile is specified, `stderr` is used.
.. cmdoption:: -l, --loglevel
Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
`ERROR`, `CRITICAL`, or `FATAL`.
.. cmdoption:: -n, --hostname
Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname),
%n (name) and %d, (domain).
.. cmdoption:: -B, --beat
Also run the `celery beat` periodic task scheduler. Please note that
there must only be one instance of this service.
.. cmdoption:: -Q, --queues
List of queues to enable for this worker, separated by comma.
By default all configured queues are enabled.
Example: `-Q video,image`
.. cmdoption:: -I, --include
Comma separated list of additional modules to import.
Example: -I foo.tasks,bar.tasks
.. cmdoption:: -s, --schedule
Path to the schedule database if running with the `-B` option.
Defaults to `celerybeat-schedule`. The extension ".db" may be
appended to the filename.
.. cmdoption:: -O
Apply optimization profile. Supported: default, fair
.. cmdoption:: --scheduler
Scheduler class to use. Default is celery.beat.PersistentScheduler
.. cmdoption:: -S, --statedb
Path to the state database. The extension '.db' may
be appended to the filename. Default: {default}
.. cmdoption:: -E, --events
Send events that can be captured by monitors like :program:`celery events`,
`celerymon`, and others.
.. cmdoption:: --without-gossip
Do not subscribe to other workers events.
.. cmdoption:: --without-mingle
Do not synchronize with other workers at startup.
.. cmdoption:: --without-heartbeat
Do not send event heartbeats.
.. cmdoption:: --heartbeat-interval
Interval in seconds at which to send worker heartbeat
.. cmdoption:: --purge
Purges all waiting tasks before the daemon is started.
**WARNING**: This is unrecoverable, and the tasks will be
deleted from the messaging server.
.. cmdoption:: --time-limit
Enables a hard time limit (in seconds int/float) for tasks.
.. cmdoption:: --soft-time-limit
Enables a soft time limit (in seconds int/float) for tasks.
.. cmdoption:: --maxtasksperchild
Maximum number of tasks a pool worker can execute before it's
terminated and replaced by a new worker.
.. cmdoption:: --pidfile
Optional file used to store the workers pid.
The worker will not start if this file already exists
and the pid is still alive.
.. cmdoption:: --autoscale
Enable autoscaling by providing
max_concurrency, min_concurrency. Example::
--autoscale=10,3
(always keep 3 processes, but grow to 10 if necessary)
.. cmdoption:: --autoreload
Enable autoreloading.
.. cmdoption:: --no-execv
Don't do execv after multiprocessing child fork.
"""
from __future__ import absolute_import, unicode_literals
import sys
from celery import concurrency
from celery.bin.base import Command, Option, daemon_options
from celery.bin.celeryd_detach import detached_celeryd
from celery.five import string_t
from celery.platforms import maybe_drop_privileges
from celery.utils import default_nodename
from celery.utils.log import LOG_LEVELS, mlevel
__all__ = ['worker', 'main']
__MODULE_DOC__ = __doc__
class worker(Command):
"""Start worker instance.
Examples::
celery worker --app=proj -l info
celery worker -A proj -l info -Q hipri,lopri
celery worker -A proj --concurrency=4
celery worker -A proj --concurrency=1000 -P eventlet
celery worker --autoscale=10,0
"""
doc = __MODULE_DOC__ # parse help from this too
namespace = 'celeryd'
enable_config_from_cmdline = True
supports_args = False
def run_from_argv(self, prog_name, argv=None, command=None):
command = sys.argv[0] if command is None else command
argv = sys.argv[1:] if argv is None else argv
# parse options before detaching so errors can be handled.
options, args = self.prepare_args(
*self.parse_options(prog_name, argv, command))
self.maybe_detach([command] + argv)
return self(*args, **options)
def maybe_detach(self, argv, dopts=['-D', '--detach']):
if any(arg in argv for arg in dopts):
argv = [v for v in argv if v not in dopts]
# will never return
detached_celeryd(self.app).execute_from_commandline(argv)
raise SystemExit(0)
def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
loglevel=None, logfile=None, pidfile=None, state_db=None,
**kwargs):
maybe_drop_privileges(uid=uid, gid=gid)
# Pools like eventlet/gevent needs to patch libs as early
# as possible.
pool_cls = (concurrency.get_implementation(pool_cls) or
self.app.conf.CELERYD_POOL)
if self.app.IS_WINDOWS and kwargs.get('beat'):
self.die('-B option does not work on Windows. '
'Please run celery beat as a separate service.')
hostname = self.host_format(default_nodename(hostname))
if loglevel:
try:
loglevel = mlevel(loglevel)
except KeyError: # pragma: no cover
self.die('Unknown level {0!r}. Please use one of {1}.'.format(
loglevel, '|'.join(
l for l in LOG_LEVELS if isinstance(l, string_t))))
return self.app.Worker(
hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
logfile=logfile, # node format handled by celery.app.log.setup
pidfile=self.node_format(pidfile, hostname),
state_db=self.node_format(state_db, hostname), **kwargs
).start()
def with_pool_option(self, argv):
# this command support custom pools
# that may have to be loaded as early as possible.
return (['-P'], ['--pool'])
def get_options(self):
conf = self.app.conf
return (
Option('-c', '--concurrency',
default=conf.CELERYD_CONCURRENCY, type='int'),
Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'),
Option('--purge', '--discard', default=False, action='store_true'),
Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL),
Option('-n', '--hostname'),
Option('-B', '--beat', action='store_true'),
Option('-s', '--schedule', dest='schedule_filename',
default=conf.CELERYBEAT_SCHEDULE_FILENAME),
Option('--scheduler', dest='scheduler_cls'),
Option('-S', '--statedb',
default=conf.CELERYD_STATE_DB, dest='state_db'),
Option('-E', '--events', default=conf.CELERY_SEND_EVENTS,
action='store_true', dest='send_events'),
Option('--time-limit', type='float', dest='task_time_limit',
default=conf.CELERYD_TASK_TIME_LIMIT),
Option('--soft-time-limit', dest='task_soft_time_limit',
default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'),
Option('--maxtasksperchild', dest='max_tasks_per_child',
default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'),
Option('--queues', '-Q', default=[]),
Option('--exclude-queues', '-X', default=[]),
Option('--include', '-I', default=[]),
Option('--autoscale'),
Option('--autoreload', action='store_true'),
Option('--no-execv', action='store_true', default=False),
Option('--without-gossip', action='store_true', default=False),
Option('--without-mingle', action='store_true', default=False),
Option('--without-heartbeat', action='store_true', default=False),
Option('--heartbeat-interval', type='int'),
Option('-O', dest='optimization'),
Option('-D', '--detach', action='store_true'),
) + daemon_options() + tuple(self.app.user_options['worker'])
def main(app=None):
# Fix for setuptools generated scripts, so that it will
# work with multiprocessing fork emulation.
# (see multiprocessing.forking.get_preparation_data())
if __name__ != '__main__': # pragma: no cover
sys.modules['__main__'] = sys.modules[__name__]
from billiard import freeze_support
freeze_support()
worker(app=app).execute_from_commandline()
if __name__ == '__main__': # pragma: no cover
main()
| 33.261993
| 79
| 0.637342
|
49a05265bb854182d7f2865ec9a6936496e2d54e
| 812
|
py
|
Python
|
python/opencv/color-detection/detect_color.py
|
lijiansong/lang
|
e255709da2b12e09dea45f86d54f77a19b96f13b
|
[
"WTFPL"
] | 1
|
2020-01-09T03:22:09.000Z
|
2020-01-09T03:22:09.000Z
|
python/opencv/color-detection/detect_color.py
|
lijiansong/lang
|
e255709da2b12e09dea45f86d54f77a19b96f13b
|
[
"WTFPL"
] | null | null | null |
python/opencv/color-detection/detect_color.py
|
lijiansong/lang
|
e255709da2b12e09dea45f86d54f77a19b96f13b
|
[
"WTFPL"
] | null | null | null |
# USAGE
# python3 detect_color.py --image pokemon_games.png
import numpy as np
import cv2
# load the image
image = cv2.imread('pokemon_games.png')
# define the list of boundaries
boundaries = [
([17, 15, 100], [50, 56, 200]),
([86, 31, 4], [220, 88, 50]),
([25, 146, 190], [62, 174, 250]),
([103, 86, 65], [145, 133, 128])
]
# loop over the boundaries
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
output = cv2.bitwise_and(image, image, mask=mask)
# show the images
cv2.imshow("images", np.hstack([image, output]))
cv2.waitKey(0)
| 24.606061
| 63
| 0.641626
|
86719db4609affe2ab17f4139b0925e50fea220f
| 4,454
|
py
|
Python
|
JavaScripts/ImageCollection/LandsatSimpleComposite.py
|
OIEIEIO/earthengine-py-notebooks
|
5d6c5cdec0c73bf02020ee17d42c9e30d633349f
|
[
"MIT"
] | 1,008
|
2020-01-27T02:03:18.000Z
|
2022-03-24T10:42:14.000Z
|
JavaScripts/ImageCollection/LandsatSimpleComposite.py
|
rafatieppo/earthengine-py-notebooks
|
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
|
[
"MIT"
] | 8
|
2020-02-01T20:18:18.000Z
|
2021-11-23T01:48:02.000Z
|
JavaScripts/ImageCollection/LandsatSimpleComposite.py
|
rafatieppo/earthengine-py-notebooks
|
99fbc4abd1fb6ba41e3d8a55f8911217353a3237
|
[
"MIT"
] | 325
|
2020-01-27T02:03:36.000Z
|
2022-03-25T20:33:33.000Z
|
# %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/JavaScripts/ImageCollection/LandsatSimpleComposite.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/ImageCollection/LandsatSimpleComposite.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/ImageCollection/LandsatSimpleComposite.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google MapS`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
# Composite 6 months of Landsat 8.
# Note that the input to simpleComposite is raw data.
l8 = ee.ImageCollection('LANDSAT/LC08/C01/T1')
# The asFloat parameter gives floating-point TOA output instead of
# the UINT8 outputs of the default simpleComposite().
composite = ee.Algorithms.Landsat.simpleComposite({
'collection': l8.filterDate('2015-1-1', '2015-7-1'),
'asFloat': True
})
# Pick a spot with lots of clouds.
Map.setCenter(-47.6735, -0.6344, 12)
# Display a composite with a band combination chosen from:
# https:#landsat.usgs.gov/how-do-landsat-8-band-combinations-differ-landsat-7-or-landsat-5-satellite-data
Map.addLayer(composite, {'bands': ['B6', 'B5', 'B4'], 'max': [0.3, 0.4, 0.3]})
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| 50.613636
| 1,021
| 0.748541
|
4970a28b266621d087080991a28c4ba7bbf93393
| 30,399
|
py
|
Python
|
guts/api/xmlutil.py
|
smallwormer/stable-liberty-guts
|
e635b710cdd210f70e9d50c3b85fffdeb53e8f01
|
[
"Apache-2.0"
] | null | null | null |
guts/api/xmlutil.py
|
smallwormer/stable-liberty-guts
|
e635b710cdd210f70e9d50c3b85fffdeb53e8f01
|
[
"Apache-2.0"
] | null | null | null |
guts/api/xmlutil.py
|
smallwormer/stable-liberty-guts
|
e635b710cdd210f70e9d50c3b85fffdeb53e8f01
|
[
"Apache-2.0"
] | 1
|
2022-03-03T05:41:31.000Z
|
2022-03-03T05:41:31.000Z
|
# Copyright (c) 2015 Aptira Pty Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import re
from lxml import etree
import six
from guts.i18n import _
from guts import utils
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
XMLNS_MIGRATION_V1 = ('http://docs.openstack.org/api/guts/1.0/'
'content')
XMLNS_MIGRATION_V2 = ('http://docs.openstack.org/api/guts/2.0/'
'content')
_split_pattern = re.compile(r'([^:{]*{[^}]*}[^:]*|[^:]+)')
def validate_schema(xml, schema_name):
if isinstance(xml, str):
xml = etree.fromstring(xml)
base_path = 'guts/api/schemas/v1.1/'
if schema_name in ('atom', 'atom-link'):
base_path = 'guts/api/schemas/'
schema_path = os.path.join(utils.gutsdir(),
'%s%s.rng' % (base_path, schema_name))
schema_doc = etree.parse(schema_path)
relaxng = etree.RelaxNG(schema_doc)
relaxng.assertValid(xml)
class Selector(object):
"""Selects datum to operate on from an object."""
def __init__(self, *chain):
"""Initialize the selector.
Each argument is a subsequent index into the object.
"""
self.chain = chain
def __repr__(self):
"""Return a representation of the selector."""
return "Selector" + repr(self.chain)
def __call__(self, obj, do_raise=False):
"""Select a datum to operate on.
Selects the relevant datum within the object.
:param obj: The object from which to select the object.
:param do_raise: If False (the default), return None if the
indexed datum does not exist. Otherwise,
raise a KeyError.
"""
# Walk the selector list
for elem in self.chain:
# If it's callable, call it
if callable(elem):
obj = elem(obj)
else:
# Use indexing
try:
obj = obj[elem]
except (KeyError, IndexError):
# No sense going any further
if do_raise:
# Convert to a KeyError, for consistency
raise KeyError(elem)
return None
# Return the finally-selected object
return obj
def get_items(obj):
"""Get items in obj."""
return list(obj.items())
class EmptyStringSelector(Selector):
"""Returns the empty string if Selector would return None."""
def __call__(self, obj, do_raise=False):
"""Returns empty string if the selected value does not exist."""
try:
return super(EmptyStringSelector, self).__call__(obj, True)
except KeyError:
return ""
class ConstantSelector(object):
"""Returns a constant."""
def __init__(self, value):
"""Initialize the selector.
:param value: The value to return.
"""
self.value = value
def __repr__(self):
"""Return a representation of the selector."""
return repr(self.value)
def __call__(self, _obj, _do_raise=False):
"""Select a datum to operate on.
Returns a constant value. Compatible with
Selector.__call__().
"""
return self.value
class TemplateElement(object):
"""Represent an element in the template."""
def __init__(self, tag, attrib=None, selector=None, subselector=None,
**extra):
"""Initialize an element.
Initializes an element in the template. Keyword arguments
specify attributes to be set on the element; values must be
callables. See TemplateElement.set() for more information.
:param tag: The name of the tag to create.
:param attrib: An optional dictionary of element attributes.
:param selector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
:param subselector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
This is used to further refine the datum
object returned by selector in the event
that it is a list of objects.
"""
# Convert selector into a Selector
if selector is None:
selector = Selector()
elif not callable(selector):
selector = Selector(selector)
# Convert subselector into a Selector
if subselector is not None and not callable(subselector):
subselector = Selector(subselector)
self.tag = tag
self.selector = selector
self.subselector = subselector
self.attrib = {}
self._text = None
self._children = []
self._childmap = {}
# Run the incoming attributes through set() so that they
# become selectorized
if not attrib:
attrib = {}
attrib.update(extra)
for k, v in attrib.items():
self.set(k, v)
def __repr__(self):
"""Return a representation of the template element."""
return ('<%s.%s %r at %#x>' %
(self.__class__.__module__, self.__class__.__name__,
self.tag, id(self)))
def __len__(self):
"""Return the number of child elements."""
return len(self._children)
def __contains__(self, key):
"""Determine whether a child node named by key exists."""
return key in self._childmap
def __getitem__(self, idx):
"""Retrieve a child node by index or name."""
if isinstance(idx, six.string_types):
# Allow access by node name
return self._childmap[idx]
else:
return self._children[idx]
def append(self, elem):
"""Append a child to the element."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.append(elem)
self._childmap[elem.tag] = elem
def extend(self, elems):
"""Append children to the element."""
# Pre-evaluate the elements
elemmap = {}
elemlist = []
for elem in elems:
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap or elem.tag in elemmap:
raise KeyError(elem.tag)
elemmap[elem.tag] = elem
elemlist.append(elem)
# Update the children
self._children.extend(elemlist)
self._childmap.update(elemmap)
def insert(self, idx, elem):
"""Insert a child element at the given index."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.insert(idx, elem)
self._childmap[elem.tag] = elem
def remove(self, elem):
"""Remove a child element."""
# Unwrap templates...
elem = elem.unwrap()
# Check if element exists
if elem.tag not in self._childmap or self._childmap[elem.tag] != elem:
raise ValueError(_('element is not a child'))
self._children.remove(elem)
del self._childmap[elem.tag]
def get(self, key):
"""Get an attribute.
Returns a callable which performs datum selection.
:param key: The name of the attribute to get.
"""
return self.attrib[key]
def set(self, key, value=None):
"""Set an attribute.
:param key: The name of the attribute to set.
:param value: A callable taking an object and optional boolean
do_raise indicator and returning the datum bound
to the attribute. If None, a Selector() will be
constructed from the key. If a string, a
Selector() will be constructed from the string.
"""
# Convert value to a selector
if value is None:
value = Selector(key)
elif not callable(value):
value = Selector(value)
self.attrib[key] = value
def keys(self):
"""Return the attribute names."""
return self.attrib.keys()
def items(self):
"""Return the attribute names and values."""
return self.attrib.items()
def unwrap(self):
"""Unwraps a template to return a template element."""
# We are a template element
return self
def wrap(self):
"""Wraps a template element to return a template."""
# Wrap in a basic Template
return Template(self)
def apply(self, elem, obj):
"""Apply text and attributes to an etree.Element.
Applies the text and attribute instructions in the template
element to an etree.Element instance.
:param elem: An etree.Element instance.
:param obj: The base object associated with this template
element.
"""
# Start with the text...
if self.text is not None:
elem.text = six.text_type(self.text(obj))
# Now set up all the attributes...
for key, value in self.attrib.items():
try:
elem.set(key, six.text_type(value(obj, True)))
except KeyError:
# Attribute has no value, so don't include it
pass
def getAttrib(self, obj):
"""Get attribute."""
tmpattrib = {}
# Now set up all the attributes...
for key, value in self.attrib.items():
try:
tmpattrib[key] = value(obj)
except KeyError:
# Attribute has no value, so don't include it
pass
return tmpattrib
@staticmethod
def _splitTagName(name):
return _split_pattern.findall(name)
def _render(self, parent, datum, patches, nsmap):
"""Internal rendering.
Renders the template node into an etree.Element object.
Returns the etree.Element object.
:param parent: The parent etree.Element instance.
:param datum: The datum associated with this template element.
:param patches: A list of other template elements that must
also be applied.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance.
"""
# Allocate a node
if callable(self.tag):
tagname = self.tag(datum)
else:
tagname = self.tag
# If the datum is None
if datum is not None:
tmpattrib = self.getAttrib(datum)
else:
tmpattrib = {}
tagnameList = self._splitTagName(tagname)
insertIndex = 0
# If parent is not none and has same tagname
if parent is not None:
for i in range(0, len(tagnameList)):
tmpInsertPos = parent.find(tagnameList[i])
if tmpInsertPos is None:
break
elif parent.attrib != tmpattrib:
break
parent = tmpInsertPos
insertIndex = i + 1
if insertIndex >= len(tagnameList):
insertIndex = insertIndex - 1
# Create root elem
elem = etree.Element(tagnameList[insertIndex], nsmap=nsmap)
rootelem = elem
subelem = elem
# Create subelem
for i in range((insertIndex + 1), len(tagnameList)):
subelem = etree.SubElement(elem, tagnameList[i])
elem = subelem
# If we have a parent, append the node to the parent
if parent is not None:
# If we can merge this element, then insert
if insertIndex > 0:
parent.insert(len(list(parent)), rootelem)
else:
parent.append(rootelem)
# If the datum is None, do nothing else
if datum is None:
return rootelem
# Apply this template element to the element
self.apply(subelem, datum)
# Additionally, apply the patches
for patch in patches:
patch.apply(subelem, datum)
# We have fully rendered the element; return it
return rootelem
def render(self, parent, obj, patches=None, nsmap=None):
"""Render an object.
Renders an object against this template node. Returns a list
of two-item tuples, where the first item is an etree.Element
instance and the second item is the datum associated with that
instance.
:param parent: The parent for the etree.Element instances.
:param obj: The object to render this template element
against.
:param patches: A list of other template elements to apply
when rendering this template element.
:param nsmap: An optional namespace dictionary to attach to
the etree.Element instances.
"""
patches = patches or []
# First, get the datum we're rendering
data = None if obj is None else self.selector(obj)
# Check if we should render at all
if not self.will_render(data):
return []
elif data is None:
return [(self._render(parent, None, patches, nsmap), None)]
# Make the data into a list if it isn't already
if not isinstance(data, list):
data = [data]
elif parent is None:
raise ValueError(_('root element selecting a list'))
# Render all the elements
elems = []
for datum in data:
if self.subselector is not None:
datum = self.subselector(datum)
elems.append((self._render(parent, datum, patches, nsmap), datum))
# Return all the elements rendered, as well as the
# corresponding datum for the next step down the tree
return elems
def will_render(self, datum):
"""Hook method.
An overridable hook method to determine whether this template
element will be rendered at all. By default, returns False
(inhibiting rendering) if the datum is None.
:param datum: The datum associated with this template element.
"""
# Don't render if datum is None
return datum is not None
def _text_get(self):
"""Template element text.
Either None or a callable taking an object and optional
boolean do_raise indicator and returning the datum bound to
the text of the template element.
"""
return self._text
def _text_set(self, value):
# Convert value to a selector
if value is not None and not callable(value):
value = Selector(value)
self._text = value
def _text_del(self):
self._text = None
text = property(_text_get, _text_set, _text_del)
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template rooted at this
element as a string, suitable for inclusion in debug logs.
"""
# Build the inner contents of the tag...
contents = [self.tag, '!selector=%r' % self.selector]
# Add the text...
if self.text is not None:
contents.append('!text=%r' % self.text)
# Add all the other attributes
for key, value in self.attrib.items():
contents.append('%s=%r' % (key, value))
# If there are no children, return it as a closed tag
if len(self) == 0:
return '<%s/>' % ' '.join([str(i) for i in contents])
# OK, recurse to our children
children = [c.tree() for c in self]
# Return the result
return ('<%s>%s</%s>' %
(' '.join(contents), ''.join(children), self.tag))
def SubTemplateElement(parent, tag, attrib=None, selector=None,
subselector=None, **extra):
"""Create a template element as a child of another.
Corresponds to the etree.SubElement interface. Parameters are as
for TemplateElement, with the addition of the parent.
"""
# Convert attributes
attrib = attrib or {}
attrib.update(extra)
# Get a TemplateElement
elem = TemplateElement(tag, attrib=attrib, selector=selector,
subselector=subselector)
# Append the parent safely
if parent is not None:
parent.append(elem)
return elem
class Template(object):
"""Represent a template."""
def __init__(self, root, nsmap=None):
"""Initialize a template.
:param root: The root element of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
self.root = root.unwrap() if root is not None else None
self.nsmap = nsmap or {}
self.serialize_options = dict(encoding='UTF-8', xml_declaration=True)
def _serialize(self, parent, obj, siblings, nsmap=None):
"""Internal serialization.
Recursive routine to build a tree of etree.Element instances
from an object based on the template. Returns the first
etree.Element instance rendered, or None.
:param parent: The parent etree.Element instance. Can be
None.
:param obj: The object to render.
:param siblings: The TemplateElement instances against which
to render the object.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance
rendered.
"""
# First step, render the element
elems = siblings[0].render(parent, obj, siblings[1:], nsmap)
# Now, traverse all child elements
seen = set()
for idx, sibling in enumerate(siblings):
for child in sibling:
# Have we handled this child already?
if child.tag in seen:
continue
seen.add(child.tag)
# Determine the child's siblings
nieces = [child]
for sib in siblings[idx + 1:]:
if child.tag in sib:
nieces.append(sib[child.tag])
# Now call this function for all data elements recursively
for elem, datum in elems:
self._serialize(elem, datum, nieces)
# Return the first element; at the top level, this will be the
# root element
if elems:
return elems[0][0]
def serialize(self, obj, *args, **kwargs):
"""Serialize an object.
Serializes an object against the template. Returns a string
with the serialized XML. Positional and keyword arguments are
passed to etree.tostring().
:param obj: The object to serialize.
"""
elem = self.make_tree(obj)
if elem is None:
return ''
for k, v in self.serialize_options.items():
kwargs.setdefault(k, v)
# Serialize it into XML
return etree.tostring(elem, *args, **kwargs)
def make_tree(self, obj):
"""Create a tree.
Serializes an object against the template. Returns an Element
node with appropriate children.
:param obj: The object to serialize.
"""
# If the template is empty, return the empty string
if self.root is None:
return None
# Get the siblings and nsmap of the root element
siblings = self._siblings()
nsmap = self._nsmap()
# Form the element tree
return self._serialize(None, obj, siblings, nsmap)
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. By default, this is the root element itself.
"""
return [self.root]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
"""
return self.nsmap.copy()
def unwrap(self):
"""Unwraps a template to return a template element."""
# Return the root element
return self.root
def wrap(self):
"""Wraps a template element to return a template."""
# We are a template
return self
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template.
:param master: The master template to test.
"""
return True
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template as a string, suitable
for inclusion in debug logs.
"""
return "%r: %s" % (self, self.root.tree())
class MasterTemplate(Template):
"""Represent a master template.
Master templates are versioned derivatives of templates that
additionally allow slave templates to be attached. Slave
templates allow modification of the serialized result without
directly changing the master.
"""
def __init__(self, root, version, nsmap=None):
"""Initialize a master template.
:param root: The root element of the template.
:param version: The version number of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(MasterTemplate, self).__init__(root, nsmap)
self.version = version
self.slaves = []
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object version %s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.version, id(self)))
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. This is the root element plus the root elements of
all the slave templates.
"""
return [self.root] + [slave.root for slave in self.slaves]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
The namespace dictionary is computed by taking the master
template's namespace dictionary and updating it from all the
slave templates.
"""
nsmap = self.nsmap.copy()
for slave in self.slaves:
nsmap.update(slave._nsmap())
return nsmap
def attach(self, *slaves):
"""Attach one or more slave templates.
Attaches one or more slave templates to the master template.
Slave templates must have a root element with the same tag as
the master template. The slave template's apply() method will
be called to determine if the slave should be applied to this
master; if it returns False, that slave will be skipped.
(This allows filtering of slaves based on the version of the
master template.)
"""
slave_list = []
for slave in slaves:
slave = slave.wrap()
# Make sure we have a tree match
if slave.root.tag != self.root.tag:
msg = (_("Template tree mismatch; adding slave %(slavetag)s "
"to master %(mastertag)s") %
{'slavetag': slave.root.tag,
'mastertag': self.root.tag})
raise ValueError(msg)
# Make sure slave applies to this template
if not slave.apply(self):
continue
slave_list.append(slave)
# Add the slaves
self.slaves.extend(slave_list)
def copy(self):
"""Return a copy of this master template."""
# Return a copy of the MasterTemplate
tmp = self.__class__(self.root, self.version, self.nsmap)
tmp.slaves = self.slaves[:]
return tmp
class SlaveTemplate(Template):
"""Represent a slave template.
Slave templates are versioned derivatives of templates. Each
slave has a minimum version and optional maximum version of the
master template to which they can be attached.
"""
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
"""Initialize a slave template.
:param root: The root element of the template.
:param min_vers: The minimum permissible version of the master
template for this slave template to apply.
:param max_vers: An optional upper bound for the master
template version.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(SlaveTemplate, self).__init__(root, nsmap)
self.min_vers = min_vers
self.max_vers = max_vers
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object versions %s-%s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.min_vers, self.max_vers, id(self)))
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template. This
version requires the master template to have a version number
between min_vers and max_vers.
:param master: The master template to test.
"""
# Does the master meet our minimum version requirement?
if master.version < self.min_vers:
return False
# How about our maximum version requirement?
if self.max_vers is not None and master.version > self.max_vers:
return False
return True
class TemplateBuilder(object):
"""Template builder.
This class exists to allow templates to be lazily built without
having to build them each time they are needed. It must be
subclassed, and the subclass must implement the construct()
method, which must return a Template (or subclass) instance. The
constructor will always return the template returned by
construct(), or, if it has a copy() method, a copy of that
template.
"""
_tmpl = None
def __new__(cls, copy=True):
"""Construct and return a template.
:param copy: If True (the default), a copy of the template
will be constructed and returned, if possible.
"""
# Do we need to construct the template?
if cls._tmpl is None:
tmp = super(TemplateBuilder, cls).__new__(cls)
# Construct the template
cls._tmpl = tmp.construct()
# If the template has a copy attribute, return the result of
# calling it
if copy and hasattr(cls._tmpl, 'copy'):
return cls._tmpl.copy()
# Return the template
return cls._tmpl
def construct(self):
"""Construct a template.
Called to construct a template instance, which it must return.
Only called once.
"""
raise NotImplementedError(_("subclasses must implement construct()!"))
def make_links(parent, selector=None):
"""Attach an Atom <links> element to the parent."""
elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM,
selector=selector)
elem.set('rel')
elem.set('type')
elem.set('href')
# Just for completeness...
return elem
def make_flat_dict(name, selector=None, subselector=None, ns=None):
"""Utility for simple XML templates.
Simple templates are templates that traditionally used
XMLDictSerializer with no metadata.
Returns a template element where the top-level element has the
given tag name, and where sub-elements have tag names derived
from the object's keys and text derived from the object's values.
This only works for flat dictionary objects, not dictionaries
containing nested lists or dictionaries.
"""
# Set up the names we need...
if ns is None:
elemname = name
tagname = Selector(0)
else:
elemname = '{%s}%s' % (ns, name)
tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0])
if selector is None:
selector = name
# Build the root element
root = TemplateElement(elemname, selector=selector,
subselector=subselector)
# Build an element to represent all the keys and values
elem = SubTemplateElement(root, tagname, selector=get_items)
elem.text = 1
# Return the template
return root
| 31.371517
| 78
| 0.593605
|
a83cd8246e345ca8860b4a62c06df0f0a1b8fdf9
| 12,501
|
py
|
Python
|
tests/unit/modules/test_cmci_get.py
|
vera-chan/ibm_zos_cics
|
727867b121ff6d6e0dfd01488d7a588597208e2b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_cmci_get.py
|
vera-chan/ibm_zos_cics
|
727867b121ff6d6e0dfd01488d7a588597208e2b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_cmci_get.py
|
vera-chan/ibm_zos_cics
|
727867b121ff6d6e0dfd01488d7a588597208e2b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) IBM Corporation 2020
# Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.ibm.ibm_zos_cics.plugins.modules import cmci_get
from ansible_collections.ibm.ibm_zos_cics.tests.unit.helpers.cmci_helper import (
HOST, PORT, CONTEXT, SCOPE, AnsibleFailJson,
set_module_args, exit_json, fail_json, cmci_module, CMCITestHelper
)
from ansible.module_utils import basic
import pytest
import re
def test_401_fails(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.stub_request(
'GET',
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/CICSDefinitionBundle/CICPY012/',
status_code=401,
reason='Not authorized',
text='<!doctype html public "-//IETF//DTD HTML 2.0//EN">\n'
'<html>'
'<head>'
'<title>CICS Web Interface error</title>'
'</head>'
'<body>'
'<H1>401 Basic Authentication Error</H1>'
'</body>'
'</html>',
headers={
'CONTENT-TYPE': 'text/html'
})
cmci_module.expect({
'msg': 'CMCI request returned non-OK status: Not authorized',
'changed': False,
'failed': True,
'http_status': 'Not authorized',
'http_status_code': 401,
'request': {
'url': 'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionbundle/CICPY012/',
'method': 'GET',
'body': None
}
})
# Module config
cmci_module.run(cmci_get, {
'cmci_host': 'winmvs2c.hursley.ibm.com',
'cmci_port': '26040',
'context': 'CICPY012',
'security_type': 'none',
'type': 'CICSDefinitionBundle'
})
def test_invalid_host(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.expect({
'msg': 'Parameter "cmci_host" with value "^*.99.99.199" was not valid. Expected an IP address or host name.',
'changed': False,
'failed': True
})
cmci_module.run(cmci_get, {
'cmci_host': '^*.99.99.199',
'cmci_port': '10080',
'context': 'iyk3z0r9',
'scope': 'iyk3z0r8',
'type': 'cicslocalfile'
})
def test_unknown_host(monkeypatch):
monkeypatch.setattr(basic.AnsibleModule, "exit_json", exit_json)
monkeypatch.setattr(basic.AnsibleModule, "fail_json", fail_json)
set_module_args({
'cmci_host': 'invalid.hursley.ibm.com',
'cmci_port': '26040',
'context': 'CICPY012',
'security_type': 'none',
'type': 'CICSDefinitionBundle'
})
with pytest.raises(AnsibleFailJson) as exc_info:
cmci_get.main()
exp = \
'Error performing CMCI request: <[^>]*>: Failed to establish a new connection: ' \
'\\[Errno 8\\] nodename nor servname provided, or not known'
assert re.match(exp, exc_info.value.args[0]['msg'])
def test_invalid_port_type(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.expect({
'msg': "argument cmci_port is of type <class 'str'> and we were unable to "
"convert to int: invalid literal for int() with base 10: '^%^080'",
'failed': True
})
cmci_module.run(cmci_get, {
'cmci_host': '100.99.99.199',
'cmci_port': '^%^080',
'context': 'iyk3z0r9',
'scope': 'iyk3z0r8',
'type': 'cicslocalfile'
})
def test_invalid_port_low(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.expect({
'msg': 'Parameter "cmci_port" with value "-1" was not valid. Expected a port number 0-65535.',
'changed': False,
'failed': True
})
cmci_module.run(cmci_get, {
'cmci_host': '100.99.99.199',
'cmci_port': -1,
'context': 'iyk3z0r9',
'scope': 'iyk3z0r8',
'type': 'cicslocalfile'
})
def test_invalid_port_high(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.expect({
'msg': 'Parameter "cmci_port" with value "65536" was not valid. Expected a port number 0-65535.',
'changed': False,
'failed': True
})
cmci_module.run(cmci_get, {
'cmci_host': '100.99.99.199',
'cmci_port': 65536,
'context': 'iyk3z0r9',
'scope': 'iyk3z0r8',
'type': 'cicslocalfile'
})
def test_invalid_context(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.expect({
'msg': 'Parameter "context" with value "^&iyk3z0r9" was not valid. Expected a CPSM context name. CPSM '
'context names are max 8 characters. Valid characters are A-Z a-z 0-9.',
'changed': False,
'failed': True
})
cmci_module.run(cmci_get, {
'cmci_host': '100.99.99.199',
'cmci_port': '10080',
'context': '^&iyk3z0r9',
'scope': 'iyk3z0r8',
'type': 'cicslocalfile'
})
def test_invalid_scope(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.expect({
'msg': 'Parameter "scope" with value "&^iyk3z0r8" was not valid. Expected a CPSM scope name. CPSM scope '
'names are max 8 characters. Valid characters are A-Z a-z 0-9.',
'changed': False,
'failed': True
})
cmci_module.run(cmci_get, {
'cmci_host': '100.99.99.199',
'cmci_port': '10080',
'context': 'iyk3z0r9',
'scope': '&^iyk3z0r8',
'type': 'cicslocalfile'
})
def test_invalid_security(cmci_module): # type: (CMCITestHelper) -> None
cmci_module.expect({
'msg': 'value of security_type must be one of: none, basic, certificate, got: yes',
'failed': True
})
cmci_module.run(cmci_get, {
'cmci_host': '100.99.99.199',
'cmci_port': '10080',
'context': 'iyk3z0r9',
'scope': 'iyk3z0r8',
'security_type': 'yes',
'type': 'cicslocalfile'
})
def test_auth(cmci_module): # type: (CMCITestHelper) -> None
records = [
{'name': 'bat', 'dsname': 'STEWF.BLOP.BLIP'},
{'name': 'bing', 'dsname': 'STEWF.BAT.BAZ'}
]
cmci_module.stub_records(
'GET',
'cicslocalfile',
records,
scope=SCOPE,
request_headers={
'authorization': 'Basic Zm9vOmJhcg=='
},
scheme='https'
)
cmci_module.expect(result(
'https://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicslocalfile/CICSEX56/IYCWEMW2',
records=records
))
cmci_module.run(cmci_get, {
'cmci_host': HOST,
'cmci_port': PORT,
'cmci_user': 'foo',
'cmci_password': 'bar',
'security_type': 'basic',
'context': CONTEXT,
'scope': SCOPE,
'type': 'cicslocalfile'
})
def test_ok_context_scope(cmci_module): # type: (CMCITestHelper) -> None
records = [
{'name': 'bat', 'dsname': 'STEWF.BLOP.BLIP'},
{'name': 'bing', 'dsname': 'STEWF.BAT.BAZ'}
]
cmci_module.stub_records('GET', 'cicslocalfile', records, scope=SCOPE)
cmci_module.expect(result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicslocalfile/CICSEX56/IYCWEMW2',
records=records
))
cmci_module.run(cmci_get, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'scope': 'IYCWEMW2',
'type': 'cicslocalfile'
})
def test_ok_context_scope_single_record(cmci_module): # type: (CMCITestHelper) -> None
records = [{'name': 'bat', 'dsname': 'STEWF.BLOP.BLIP'}]
cmci_module.stub_records('GET', 'cicslocalfile', records, scope=SCOPE)
cmci_module.expect(result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicslocalfile/CICSEX56/IYCWEMW2',
records=records
))
cmci_module.run(cmci_get, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'scope': 'IYCWEMW2',
'type': 'cicslocalfile'
})
def test_ok_context_scope_jvmserver_header(cmci_module): # type: (CMCITestHelper) -> None
records = [
{'name': 'bat', 'dsname': 'STEWF.BLOP.BLIP'},
{'name': 'bing', 'dsname': 'STEWF.BAT.BAZ'}
]
cmci_module.stub_records(
'GET',
'cicslocalfile',
records,
scope=SCOPE,
headers={
# JVM server returns a content type with the charset embedded
'Content-Type': 'application/xml;charset=UTF-8'
}
)
cmci_module.expect(result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicslocalfile/CICSEX56/IYCWEMW2',
records=records
))
cmci_module.run(cmci_get, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'scope': 'IYCWEMW2',
'type': 'cicslocalfile'
})
def test_query_criteria(cmci_module): # type: (CMCITestHelper) -> None
records = [{'name': 'bat', 'dsname': 'STEWF.BLOP.BLIP'}]
cmci_module.stub_records('GET', 'cicslocalfile', records, scope=SCOPE, parameters='?CRITERIA=%28FOO%3D%27BAR%27%29')
cmci_module.expect(result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicslocalfile/CICSEX56/IYCWEMW2?CRITERIA=%28FOO%3D%27BAR%27%29',
records=records
))
cmci_module.run(cmci_get, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'scope': 'IYCWEMW2',
'type': 'cicslocalfile',
'resources': {
'filter': {
'FOO': 'BAR'
}
}
})
def test_query_parameter(cmci_module): # type: (CMCITestHelper) -> None
records = [{'name': 'bat', 'dsname': 'STEWF.BLOP.BLIP'}]
cmci_module.stub_records(
'GET',
'cicsdefinitionfile',
records,
scope=SCOPE,
parameters='?PARAMETER=CSDGROUP%28%2A%29'
)
cmci_module.expect(result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionfile/CICSEX56/IYCWEMW2?PARAMETER=CSDGROUP%28%2A%29',
records=records
))
cmci_module.run(cmci_get, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'scope': 'IYCWEMW2',
'type': 'cicsdefinitionfile',
'resources': {
'parameter': 'CSDGROUP(*)'
}
})
def test_query_parameter_criteria(cmci_module): # type: (CMCITestHelper) -> None
records = [{'name': 'bat', 'dsname': 'STEWF.BLOP.BLIP'}]
cmci_module.stub_records(
'GET',
'cicsdefinitionfile',
records,
scope=SCOPE,
parameters='?CRITERIA=%28FOO%3D%27BAR%27%29&PARAMETER=CSDGROUP%28%2A%29'
)
cmci_module.expect(result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/'
'cicsdefinitionfile/CICSEX56/IYCWEMW2?CRITERIA=%28FOO%3D%27BAR%27%29&PARAMETER=CSDGROUP%28%2A%29',
records=records
))
cmci_module.run(cmci_get, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'scope': 'IYCWEMW2',
'type': 'cicsdefinitionfile',
'resources': {
'parameter': 'CSDGROUP(*)',
'filter': {
'FOO': 'BAR'
}
}
})
def test_ok_context_record_count(cmci_module): # type: (CMCITestHelper) -> None
records = [{'name': 'bat', 'dsname': 'STEWF.BLOP.BLIP'}]
cmci_module.stub_records('GET', 'cicslocalfile', records, record_count=1)
cmci_module.expect(result(
'http://winmvs2c.hursley.ibm.com:26040/CICSSystemManagement/cicslocalfile/CICSEX56///1',
records=records
))
cmci_module.run(cmci_get, {
'cmci_host': HOST,
'cmci_port': PORT,
'context': CONTEXT,
'record_count': 1,
'type': 'cicslocalfile'
})
def result(url, records, http_status='OK', http_status_code=200):
return {
'changed': False,
'connect_version': '0560',
'cpsm_reason': '',
'cpsm_reason_code': 0,
'cpsm_response': 'OK',
'cpsm_response_code': 1024,
'http_status': http_status,
'http_status_code': http_status_code,
'record_count': len(records),
'records': records,
'request': {
'url': url,
'method': 'GET',
'body': None
}
}
| 29.623223
| 121
| 0.584513
|
146eae829642956c5318bdbc5073f1cfd4e06145
| 26,399
|
py
|
Python
|
deepdiff/delta.py
|
dtorres-sf/deepdiff
|
1dd0aecbc8307842e54ce429ddb254ef4bc97724
|
[
"MIT"
] | 1,349
|
2015-01-15T20:25:28.000Z
|
2022-03-30T11:58:03.000Z
|
deepdiff/delta.py
|
dtorres-sf/deepdiff
|
1dd0aecbc8307842e54ce429ddb254ef4bc97724
|
[
"MIT"
] | 270
|
2015-02-25T00:58:09.000Z
|
2022-03-31T18:02:17.000Z
|
deepdiff/delta.py
|
dtorres-sf/deepdiff
|
1dd0aecbc8307842e54ce429ddb254ef4bc97724
|
[
"MIT"
] | 187
|
2015-02-24T18:21:07.000Z
|
2022-03-06T18:38:20.000Z
|
import logging
from collections.abc import Mapping
from copy import deepcopy
from deepdiff import DeepDiff
from deepdiff.serialization import pickle_load, pickle_dump
from deepdiff.helper import (
strings, short_repr, numbers,
np_ndarray, np_array_factory, numpy_dtypes, get_doc,
not_found, numpy_dtype_string_to_type, dict_)
from deepdiff.path import _path_to_elements, _get_nested_obj, GET, GETATTR
from deepdiff.anyset import AnySet
logger = logging.getLogger(__name__)
VERIFICATION_MSG = 'Expected the old value for {} to be {} but it is {}. Error found on: {}'
ELEM_NOT_FOUND_TO_ADD_MSG = 'Key or index of {} is not found for {} for setting operation.'
TYPE_CHANGE_FAIL_MSG = 'Unable to do the type change for {} from to type {} due to {}'
VERIFY_SYMMETRY_MSG = ('while checking the symmetry of the delta. You have applied the delta to an object that has '
'different values than the original object the delta was made from')
FAIL_TO_REMOVE_ITEM_IGNORE_ORDER_MSG = 'Failed to remove index[{}] on {}. It was expected to be {} but got {}'
DELTA_NUMPY_OPERATOR_OVERRIDE_MSG = (
'A numpy ndarray is most likely being added to a delta. '
'Due to Numpy override the + operator, you can only do: delta + ndarray '
'and NOT ndarray + delta')
BINIARY_MODE_NEEDED_MSG = "Please open the file in the binary mode and pass to Delta by passing 'b' in open(..., 'b'): {}"
DELTA_AT_LEAST_ONE_ARG_NEEDED = 'At least one of the diff, delta_path or delta_file arguments need to be passed.'
INVALID_ACTION_WHEN_CALLING_GET_ELEM = 'invalid action of {} when calling _get_elem_and_compare_to_old_value'
INVALID_ACTION_WHEN_CALLING_SIMPLE_SET_ELEM = 'invalid action of {} when calling _simple_set_elem_value'
INVALID_ACTION_WHEN_CALLING_SIMPLE_DELETE_ELEM = 'invalid action of {} when calling _simple_set_elem_value'
UNABLE_TO_GET_ITEM_MSG = 'Unable to get the item at {}: {}'
UNABLE_TO_GET_PATH_MSG = 'Unable to get the item at {}'
INDEXES_NOT_FOUND_WHEN_IGNORE_ORDER = 'Delta added to an incompatible object. Unable to add the following items at the specific indexes. {}'
NUMPY_TO_LIST = 'NUMPY_TO_LIST'
NOT_VALID_NUMPY_TYPE = "{} is not a valid numpy type."
doc = get_doc('delta.rst')
class DeltaError(ValueError):
"""
Delta specific errors
"""
pass
class DeltaNumpyOperatorOverrideError(ValueError):
"""
Delta Numpy Operator Override Error
"""
pass
class Delta:
__doc__ = doc
def __init__(
self,
diff=None,
delta_path=None,
delta_file=None,
deserializer=pickle_load,
log_errors=True,
mutate=False,
raise_errors=False,
safe_to_import=None,
serializer=pickle_dump,
verify_symmetry=False,
):
if 'safe_to_import' not in set(deserializer.__code__.co_varnames):
def _deserializer(obj, safe_to_import=None):
return deserializer(obj)
else:
_deserializer = deserializer
if diff is not None:
if isinstance(diff, DeepDiff):
self.diff = diff._to_delta_dict(directed=not verify_symmetry)
elif isinstance(diff, Mapping):
self.diff = diff
elif isinstance(diff, strings):
self.diff = _deserializer(diff, safe_to_import=safe_to_import)
elif delta_path:
with open(delta_path, 'rb') as the_file:
content = the_file.read()
self.diff = _deserializer(content, safe_to_import=safe_to_import)
elif delta_file:
try:
content = delta_file.read()
except UnicodeDecodeError as e:
raise ValueError(BINIARY_MODE_NEEDED_MSG.format(e)) from None
self.diff = _deserializer(content, safe_to_import=safe_to_import)
else:
raise ValueError(DELTA_AT_LEAST_ONE_ARG_NEEDED)
self.mutate = mutate
self.verify_symmetry = verify_symmetry
self.raise_errors = raise_errors
self.log_errors = log_errors
self._numpy_paths = self.diff.pop('_numpy_paths', False)
self.serializer = serializer
self.deserializer = deserializer
self.reset()
def __repr__(self):
return "<Delta: {}>".format(short_repr(self.diff, max_length=100))
def reset(self):
self.post_process_paths_to_convert = dict_()
def __add__(self, other):
if isinstance(other, numbers) and self._numpy_paths:
raise DeltaNumpyOperatorOverrideError(DELTA_NUMPY_OPERATOR_OVERRIDE_MSG)
if self.mutate:
self.root = other
else:
self.root = deepcopy(other)
self._do_pre_process()
self._do_values_changed()
self._do_set_item_added()
self._do_set_item_removed()
self._do_type_changes()
# NOTE: the remove iterable action needs to happen BEFORE
# all the other iterables to match the reverse of order of operations in DeepDiff
self._do_iterable_item_removed()
self._do_iterable_item_added()
self._do_ignore_order()
self._do_dictionary_item_added()
self._do_dictionary_item_removed()
self._do_attribute_added()
self._do_attribute_removed()
self._do_post_process()
other = self.root
# removing the reference to other
del self.root
self.reset()
return other
__radd__ = __add__
def _raise_or_log(self, msg, level='error'):
if self.log_errors:
getattr(logger, level)(msg)
if self.raise_errors:
raise DeltaError(msg)
def _do_verify_changes(self, path, expected_old_value, current_old_value):
if self.verify_symmetry and expected_old_value != current_old_value:
self._raise_or_log(VERIFICATION_MSG.format(
path, expected_old_value, current_old_value, VERIFY_SYMMETRY_MSG))
def _get_elem_and_compare_to_old_value(self, obj, path_for_err_reporting, expected_old_value, elem=None, action=None):
try:
if action == GET:
current_old_value = obj[elem]
elif action == GETATTR:
current_old_value = getattr(obj, elem)
else:
raise DeltaError(INVALID_ACTION_WHEN_CALLING_GET_ELEM.format(action))
except (KeyError, IndexError, AttributeError, IndexError, TypeError) as e:
current_old_value = not_found
if isinstance(path_for_err_reporting, (list, tuple)):
path_for_err_reporting = '.'.join([i[0] for i in path_for_err_reporting])
if self.verify_symmetry:
self._raise_or_log(VERIFICATION_MSG.format(
path_for_err_reporting,
expected_old_value, current_old_value, e))
else:
self._raise_or_log(UNABLE_TO_GET_PATH_MSG.format(
path_for_err_reporting))
return current_old_value
def _simple_set_elem_value(self, obj, path_for_err_reporting, elem=None, value=None, action=None):
"""
Set the element value directly on an object
"""
try:
if action == GET:
try:
obj[elem] = value
except IndexError:
if elem == len(obj):
obj.append(value)
else:
self._raise_or_log(ELEM_NOT_FOUND_TO_ADD_MSG.format(elem, path_for_err_reporting))
elif action == GETATTR:
setattr(obj, elem, value)
else:
raise DeltaError(INVALID_ACTION_WHEN_CALLING_SIMPLE_SET_ELEM.format(action))
except (KeyError, IndexError, AttributeError, TypeError) as e:
self._raise_or_log('Failed to set {} due to {}'.format(path_for_err_reporting, e))
def _coerce_obj(self, parent, obj, path, parent_to_obj_elem,
parent_to_obj_action, elements, to_type, from_type):
"""
Coerce obj and mark it in post_process_paths_to_convert for later to be converted back.
Also reassign it to its parent to replace the old object.
"""
self.post_process_paths_to_convert[elements[:-1]] = {'old_type': to_type, 'new_type': from_type}
# If this function is going to ever be used to convert numpy arrays, uncomment these lines:
# if from_type is np_ndarray:
# obj = obj.tolist()
# else:
obj = to_type(obj)
if parent:
# Making sure that the object is re-instated inside the parent especially if it was immutable
# and we had to turn it into a mutable one. In such cases the object has a new id.
self._simple_set_elem_value(obj=parent, path_for_err_reporting=path, elem=parent_to_obj_elem,
value=obj, action=parent_to_obj_action)
return obj
def _set_new_value(self, parent, parent_to_obj_elem, parent_to_obj_action,
obj, elements, path, elem, action, new_value):
"""
Set the element value on an object and if necessary convert the object to the proper mutable type
"""
if isinstance(obj, tuple):
# convert this object back to a tuple later
obj = self._coerce_obj(
parent, obj, path, parent_to_obj_elem,
parent_to_obj_action, elements,
to_type=list, from_type=tuple)
self._simple_set_elem_value(obj=obj, path_for_err_reporting=path, elem=elem,
value=new_value, action=action)
def _simple_delete_elem(self, obj, path_for_err_reporting, elem=None, action=None):
"""
Delete the element directly on an object
"""
try:
if action == GET:
del obj[elem]
elif action == GETATTR:
del obj.__dict__[elem]
else:
raise DeltaError(INVALID_ACTION_WHEN_CALLING_SIMPLE_DELETE_ELEM.format(action))
except (KeyError, IndexError, AttributeError) as e:
self._raise_or_log('Failed to set {} due to {}'.format(path_for_err_reporting, e))
def _del_elem(self, parent, parent_to_obj_elem, parent_to_obj_action,
obj, elements, path, elem, action):
"""
Delete the element value on an object and if necessary convert the object to the proper mutable type
"""
obj_is_new = False
if isinstance(obj, tuple):
# convert this object back to a tuple later
self.post_process_paths_to_convert[elements[:-1]] = {'old_type': list, 'new_type': tuple}
obj = list(obj)
obj_is_new = True
self._simple_delete_elem(obj=obj, path_for_err_reporting=path, elem=elem, action=action)
if obj_is_new and parent:
# Making sure that the object is re-instated inside the parent especially if it was immutable
# and we had to turn it into a mutable one. In such cases the object has a new id.
self._simple_set_elem_value(obj=parent, path_for_err_reporting=path, elem=parent_to_obj_elem,
value=obj, action=parent_to_obj_action)
def _do_iterable_item_added(self):
iterable_item_added = self.diff.get('iterable_item_added', {})
iterable_item_moved = self.diff.get('iterable_item_moved')
if iterable_item_moved:
added_dict = {v["new_path"]: v["value"] for k, v in iterable_item_moved.items()}
iterable_item_added.update(added_dict)
if iterable_item_added:
self._do_item_added(iterable_item_added, insert=True)
def _do_dictionary_item_added(self):
dictionary_item_added = self.diff.get('dictionary_item_added')
if dictionary_item_added:
self._do_item_added(dictionary_item_added, sort=False)
def _do_attribute_added(self):
attribute_added = self.diff.get('attribute_added')
if attribute_added:
self._do_item_added(attribute_added)
def _do_item_added(self, items, sort=True, insert=False):
if sort:
# sorting items by their path so that the items with smaller index
# are applied first (unless `sort` is `False` so that order of
# added items is retained, e.g. for dicts).
items = sorted(items.items(), key=lambda x: x[0])
else:
items = items.items()
for path, new_value in items:
elem_and_details = self._get_elements_and_details(path)
if elem_and_details:
elements, parent, parent_to_obj_elem, parent_to_obj_action, obj, elem, action = elem_and_details
else:
continue # pragma: no cover. Due to cPython peephole optimizer, this line doesn't get covered. https://github.com/nedbat/coveragepy/issues/198
# Insert is only true for iterables, make sure it is a valid index.
if(insert and elem < len(obj)):
obj.insert(elem, None)
self._set_new_value(parent, parent_to_obj_elem, parent_to_obj_action,
obj, elements, path, elem, action, new_value)
def _do_values_changed(self):
values_changed = self.diff.get('values_changed')
if values_changed:
self._do_values_or_type_changed(values_changed)
def _do_type_changes(self):
type_changes = self.diff.get('type_changes')
if type_changes:
self._do_values_or_type_changed(type_changes, is_type_change=True)
def _do_post_process(self):
if self.post_process_paths_to_convert:
self._do_values_or_type_changed(self.post_process_paths_to_convert, is_type_change=True)
def _do_pre_process(self):
if self._numpy_paths and ('iterable_item_added' in self.diff or 'iterable_item_removed' in self.diff):
preprocess_paths = dict_()
for path, type_ in self._numpy_paths.items():
preprocess_paths[path] = {'old_type': np_ndarray, 'new_type': list}
try:
type_ = numpy_dtype_string_to_type(type_)
except Exception as e:
self._raise_or_log(NOT_VALID_NUMPY_TYPE.format(e))
continue # pragma: no cover. Due to cPython peephole optimizer, this line doesn't get covered. https://github.com/nedbat/coveragepy/issues/198
self.post_process_paths_to_convert[path] = {'old_type': list, 'new_type': type_}
if preprocess_paths:
self._do_values_or_type_changed(preprocess_paths, is_type_change=True)
def _get_elements_and_details(self, path):
try:
elements = _path_to_elements(path)
if len(elements) > 1:
parent = _get_nested_obj(obj=self, elements=elements[:-2])
parent_to_obj_elem, parent_to_obj_action = elements[-2]
obj = self._get_elem_and_compare_to_old_value(
obj=parent, path_for_err_reporting=path, expected_old_value=None,
elem=parent_to_obj_elem, action=parent_to_obj_action)
else:
parent = parent_to_obj_elem = parent_to_obj_action = None
obj = _get_nested_obj(obj=self, elements=elements[:-1])
elem, action = elements[-1]
except Exception as e:
self._raise_or_log(UNABLE_TO_GET_ITEM_MSG.format(path, e))
return None
else:
if obj is not_found:
return None
return elements, parent, parent_to_obj_elem, parent_to_obj_action, obj, elem, action
def _do_values_or_type_changed(self, changes, is_type_change=False):
for path, value in changes.items():
elem_and_details = self._get_elements_and_details(path)
if elem_and_details:
elements, parent, parent_to_obj_elem, parent_to_obj_action, obj, elem, action = elem_and_details
else:
continue # pragma: no cover. Due to cPython peephole optimizer, this line doesn't get covered. https://github.com/nedbat/coveragepy/issues/198
expected_old_value = value.get('old_value', not_found)
current_old_value = self._get_elem_and_compare_to_old_value(
obj=obj, path_for_err_reporting=path, expected_old_value=expected_old_value, elem=elem, action=action)
if current_old_value is not_found:
continue # pragma: no cover. I have not been able to write a test for this case. But we should still check for it.
# With type change if we could have originally converted the type from old_value
# to new_value just by applying the class of the new_value, then we might not include the new_value
# in the delta dictionary.
if is_type_change and 'new_value' not in value:
try:
new_type = value['new_type']
# in case of Numpy we pass the ndarray plus the dtype in a tuple
if new_type in numpy_dtypes:
new_value = np_array_factory(current_old_value, new_type)
else:
new_value = new_type(current_old_value)
except Exception as e:
self._raise_or_log(TYPE_CHANGE_FAIL_MSG.format(obj[elem], value.get('new_type', 'unknown'), e))
continue
else:
new_value = value['new_value']
self._set_new_value(parent, parent_to_obj_elem, parent_to_obj_action,
obj, elements, path, elem, action, new_value)
self._do_verify_changes(path, expected_old_value, current_old_value)
def _do_item_removed(self, items):
"""
Handle removing items.
"""
# Sorting the iterable_item_removed in reverse order based on the paths.
# So that we delete a bigger index before a smaller index
for path, expected_old_value in sorted(items.items(), key=lambda x: x[0], reverse=True):
elem_and_details = self._get_elements_and_details(path)
if elem_and_details:
elements, parent, parent_to_obj_elem, parent_to_obj_action, obj, elem, action = elem_and_details
else:
continue # pragma: no cover. Due to cPython peephole optimizer, this line doesn't get covered. https://github.com/nedbat/coveragepy/issues/198
current_old_value = self._get_elem_and_compare_to_old_value(
obj=obj, elem=elem, path_for_err_reporting=path, expected_old_value=expected_old_value, action=action)
if current_old_value is not_found:
continue
self._del_elem(parent, parent_to_obj_elem, parent_to_obj_action,
obj, elements, path, elem, action)
self._do_verify_changes(path, expected_old_value, current_old_value)
def _do_iterable_item_removed(self):
iterable_item_removed = self.diff.get('iterable_item_removed', {})
iterable_item_moved = self.diff.get('iterable_item_moved')
if iterable_item_moved:
# These will get added back during items_added
removed_dict = {k: v["value"] for k, v in iterable_item_moved.items()}
iterable_item_removed.update(removed_dict)
if iterable_item_removed:
self._do_item_removed(iterable_item_removed)
def _do_dictionary_item_removed(self):
dictionary_item_removed = self.diff.get('dictionary_item_removed')
if dictionary_item_removed:
self._do_item_removed(dictionary_item_removed)
def _do_attribute_removed(self):
attribute_removed = self.diff.get('attribute_removed')
if attribute_removed:
self._do_item_removed(attribute_removed)
def _do_set_item_added(self):
items = self.diff.get('set_item_added')
if items:
self._do_set_or_frozenset_item(items, func='union')
def _do_set_item_removed(self):
items = self.diff.get('set_item_removed')
if items:
self._do_set_or_frozenset_item(items, func='difference')
def _do_set_or_frozenset_item(self, items, func):
for path, value in items.items():
elements = _path_to_elements(path)
parent = _get_nested_obj(obj=self, elements=elements[:-1])
elem, action = elements[-1]
obj = self._get_elem_and_compare_to_old_value(
parent, path_for_err_reporting=path, expected_old_value=None, elem=elem, action=action)
new_value = getattr(obj, func)(value)
self._simple_set_elem_value(parent, path_for_err_reporting=path, elem=elem, value=new_value, action=action)
def _do_ignore_order_get_old(self, obj, remove_indexes_per_path, fixed_indexes_values, path_for_err_reporting):
"""
A generator that gets the old values in an iterable when the order was supposed to be ignored.
"""
old_obj_index = -1
max_len = len(obj) - 1
while old_obj_index < max_len:
old_obj_index += 1
current_old_obj = obj[old_obj_index]
if current_old_obj in fixed_indexes_values:
continue
if old_obj_index in remove_indexes_per_path:
expected_obj_to_delete = remove_indexes_per_path.pop(old_obj_index)
if current_old_obj == expected_obj_to_delete:
continue
else:
self._raise_or_log(FAIL_TO_REMOVE_ITEM_IGNORE_ORDER_MSG.format(
old_obj_index, path_for_err_reporting, expected_obj_to_delete, current_old_obj))
yield current_old_obj
def _do_ignore_order(self):
"""
't1': [5, 1, 1, 1, 6],
't2': [7, 1, 1, 1, 8],
'iterable_items_added_at_indexes': {
'root': {
0: 7,
4: 8
}
},
'iterable_items_removed_at_indexes': {
'root': {
4: 6,
0: 5
}
}
"""
fixed_indexes = self.diff.get('iterable_items_added_at_indexes', dict_())
remove_indexes = self.diff.get('iterable_items_removed_at_indexes', dict_())
paths = set(fixed_indexes.keys()) | set(remove_indexes.keys())
for path in paths:
# In the case of ignore_order reports, we are pointing to the container object.
# Thus we add a [0] to the elements so we can get the required objects and discard what we don't need.
elem_and_details = self._get_elements_and_details("{}[0]".format(path))
if elem_and_details:
_, parent, parent_to_obj_elem, parent_to_obj_action, obj, _, _ = elem_and_details
else:
continue # pragma: no cover. Due to cPython peephole optimizer, this line doesn't get covered. https://github.com/nedbat/coveragepy/issues/198
# copying both these dictionaries since we don't want to mutate them.
fixed_indexes_per_path = fixed_indexes.get(path, dict_()).copy()
remove_indexes_per_path = remove_indexes.get(path, dict_()).copy()
fixed_indexes_values = AnySet(fixed_indexes_per_path.values())
new_obj = []
# Numpy's NdArray does not like the bool function.
if isinstance(obj, np_ndarray):
there_are_old_items = obj.size > 0
else:
there_are_old_items = bool(obj)
old_item_gen = self._do_ignore_order_get_old(
obj, remove_indexes_per_path, fixed_indexes_values, path_for_err_reporting=path)
while there_are_old_items or fixed_indexes_per_path:
new_obj_index = len(new_obj)
if new_obj_index in fixed_indexes_per_path:
new_item = fixed_indexes_per_path.pop(new_obj_index)
new_obj.append(new_item)
elif there_are_old_items:
try:
new_item = next(old_item_gen)
except StopIteration:
there_are_old_items = False
else:
new_obj.append(new_item)
else:
# pop a random item from the fixed_indexes_per_path dictionary
self._raise_or_log(INDEXES_NOT_FOUND_WHEN_IGNORE_ORDER.format(fixed_indexes_per_path))
new_item = fixed_indexes_per_path.pop(next(iter(fixed_indexes_per_path)))
new_obj.append(new_item)
if isinstance(obj, tuple):
new_obj = tuple(new_obj)
# Making sure that the object is re-instated inside the parent especially if it was immutable
# and we had to turn it into a mutable one. In such cases the object has a new id.
self._simple_set_elem_value(obj=parent, path_for_err_reporting=path, elem=parent_to_obj_elem,
value=new_obj, action=parent_to_obj_action)
def dump(self, file):
"""
Dump into file object
"""
# Small optimization: Our internal pickle serializer can just take a file object
# and directly write to it. However if a user defined serializer is passed
# we want to make it compatible with the expectation that self.serializer(self.diff)
# will give the user the serialization and then it can be written to
# a file object when using the dump(file) function.
param_names_of_serializer = set(self.serializer.__code__.co_varnames)
if 'file_obj' in param_names_of_serializer:
self.serializer(self.diff, file_obj=file)
else:
file.write(self.dumps())
def dumps(self):
"""
Return the serialized representation of the object as a bytes object, instead of writing it to a file.
"""
return self.serializer(self.diff)
def to_dict(self):
return dict(self.diff)
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
| 46.477113
| 163
| 0.637751
|
b9c184207839866636d560f91d98548195dece76
| 259
|
py
|
Python
|
demo_stock/A_1day/D02_merge_total_shares.py
|
jiangtiantu/kquant_data
|
9bd47ba23c23110757186897e37ea36234bdce2c
|
[
"BSD-2-Clause"
] | 23
|
2017-08-05T04:35:47.000Z
|
2020-12-16T09:40:08.000Z
|
demo_stock/A_1day/D02_merge_total_shares.py
|
jiangtiantu/kquant_data
|
9bd47ba23c23110757186897e37ea36234bdce2c
|
[
"BSD-2-Clause"
] | 2
|
2017-08-05T04:57:10.000Z
|
2018-04-14T14:52:39.000Z
|
demo_stock/A_1day/D02_merge_total_shares.py
|
wukan1986/kquant_data
|
9bd47ba23c23110757186897e37ea36234bdce2c
|
[
"BSD-2-Clause"
] | 21
|
2017-08-01T09:56:30.000Z
|
2021-07-10T01:19:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
合并财务数据
"""
from kquant_data.processing.merge import merge_report
if __name__ == '__main__':
rule = '1day'
field = 'total_shares'
merge_report(rule, field, field)
print("Done")
debug = 1
| 17.266667
| 53
| 0.629344
|
00d0255e06c68688d9db33323d750f7605745852
| 13,806
|
py
|
Python
|
colour/appearance/tests/test_kim2009.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
colour/appearance/tests/test_kim2009.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
colour/appearance/tests/test_kim2009.py
|
aurelienpierre/colour
|
3ac45c12fbc0493e49ba4d4b2cb253df9fe14c47
|
[
"BSD-3-Clause"
] | null | null | null |
# !/usr/bin/env python
"""Defines the unit tests for the :mod:`colour.appearance.kim2009` module."""
import numpy as np
import unittest
from itertools import permutations
from colour.appearance import (
MEDIA_PARAMETERS_KIM2009,
VIEWING_CONDITIONS_KIM2009,
InductionFactors_Kim2009,
CAM_Specification_Kim2009,
MediaParameters_Kim2009,
XYZ_to_Kim2009,
Kim2009_to_XYZ,
)
from colour.utilities import (
as_float_array,
domain_range_scale,
ignore_numpy_errors,
tsplit,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestXYZ_to_Kim2009",
"TestKim2009_to_XYZ",
]
class TestXYZ_to_Kim2009(unittest.TestCase):
"""
Define :func:`colour.appearance.kim2009.XYZ_to_Kim2009` definition unit
tests methods.
"""
def test_XYZ_to_Kim2009(self):
"""Test :func:`colour.appearance.kim2009.XYZ_to_Kim2009` definition."""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_a = 318.31
media = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
surround = VIEWING_CONDITIONS_KIM2009["Average"]
np.testing.assert_almost_equal(
XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround),
np.array(
[
28.86190898,
0.55924559,
219.04806678,
9.38377973,
52.71388839,
0.46417384,
278.06028246,
np.nan,
]
),
decimal=7,
)
XYZ = np.array([57.06, 43.06, 31.96])
L_a = 31.83
np.testing.assert_almost_equal(
XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround),
np.array(
[
70.15940419,
57.89295872,
21.27017200,
61.23630434,
128.14034598,
48.05115573,
1.41841443,
np.nan,
]
),
decimal=7,
)
XYZ = np.array([3.53, 6.56, 2.14])
XYZ_w = np.array([109.85, 100.00, 35.58])
L_a = 318.31
np.testing.assert_almost_equal(
XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround),
np.array(
[
-4.83430022,
37.42013921,
177.12166057,
np.nan,
-8.82944930,
31.05871555,
220.36270343,
np.nan,
]
),
decimal=7,
)
XYZ = np.array([19.01, 20.00, 21.78])
L_a = 31.83
np.testing.assert_almost_equal(
XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround),
np.array(
[
47.20460719,
56.35723637,
241.04877377,
73.65830083,
86.21530880,
46.77650619,
301.77516676,
np.nan,
]
),
decimal=7,
)
def test_n_dimensional_XYZ_to_Kim2009(self):
"""
Test :func:`colour.appearance.kim2009.XYZ_to_Kim2009` definition
n-dimensional support.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_a = 318.31
media = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
surround = VIEWING_CONDITIONS_KIM2009["Average"]
specification = XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround)
XYZ = np.tile(XYZ, (6, 1))
specification = np.tile(specification, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround),
specification,
decimal=7,
)
XYZ_w = np.tile(XYZ_w, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround),
specification,
decimal=7,
)
XYZ = np.reshape(XYZ, (2, 3, 3))
XYZ_w = np.reshape(XYZ_w, (2, 3, 3))
specification = np.reshape(specification, (2, 3, 8))
np.testing.assert_almost_equal(
XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround),
specification,
decimal=7,
)
@ignore_numpy_errors
def test_domain_range_scale_XYZ_to_Kim2009(self):
"""
Test :func:`colour.appearance.kim2009.XYZ_to_Kim2009` definition
domain and range scale support.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_a = 318.31
media = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
surround = VIEWING_CONDITIONS_KIM2009["Average"]
specification = XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround)
d_r = (
("reference", 1, 1),
(
"1",
0.01,
np.array(
[
1 / 100,
1 / 100,
1 / 360,
1 / 100,
1 / 100,
1 / 100,
1 / 400,
np.nan,
]
),
),
(
"100",
1,
np.array([1, 1, 100 / 360, 1, 1, 1, 100 / 400, np.nan]),
),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_Kim2009(
XYZ * factor_a, XYZ_w * factor_a, L_a, media, surround
),
as_float_array(specification) * factor_b,
decimal=7,
)
@ignore_numpy_errors
def test_nan_XYZ_to_Kim2009(self):
"""
Test :func:`colour.appearance.kim2009.XYZ_to_Kim2009` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_w = np.array(case)
L_a = case[0]
media = MediaParameters_Kim2009(case[0])
surround = InductionFactors_Kim2009(case[0], case[0], case[0])
XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround)
class TestKim2009_to_XYZ(unittest.TestCase):
"""
Define :func:`colour.appearance.kim2009.Kim2009_to_XYZ` definition unit
tests methods.
"""
def test_Kim2009_to_XYZ(self):
"""Test :func:`colour.appearance.kim2009.Kim2009_to_XYZ` definition."""
specification = CAM_Specification_Kim2009(
28.86190898,
0.55924559,
219.04806678,
9.38377973,
52.71388839,
0.46417384,
278.06028246,
np.nan,
)
XYZ_w = np.array([95.05, 100.00, 108.88])
L_a = 318.31
media = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
surround = VIEWING_CONDITIONS_KIM2009["Average"]
np.testing.assert_allclose(
Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround),
np.array([19.01, 20.00, 21.78]),
atol=0.01,
rtol=0.01,
)
specification = CAM_Specification_Kim2009(
70.15940419,
57.89295872,
21.27017200,
61.23630434,
128.14034598,
48.05115573,
1.41841443,
np.nan,
)
L_a = 31.83
np.testing.assert_allclose(
Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround),
np.array([57.06, 43.06, 31.96]),
atol=0.01,
rtol=0.01,
)
specification = CAM_Specification_Kim2009(
-4.83430022,
37.42013921,
177.12166057,
np.nan,
-8.82944930,
31.05871555,
220.36270343,
np.nan,
)
XYZ_w = np.array([109.85, 100.00, 35.58])
L_a = 318.31
np.testing.assert_allclose(
Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround),
np.array([3.53, 6.56, 2.14]),
atol=0.01,
rtol=0.01,
)
specification = CAM_Specification_Kim2009(
47.20460719,
56.35723637,
241.04877377,
73.65830083,
86.21530880,
46.77650619,
301.77516676,
np.nan,
)
L_a = 31.83
np.testing.assert_allclose(
Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround),
np.array([19.01, 20.00, 21.78]),
atol=0.01,
rtol=0.01,
)
def test_n_dimensional_Kim2009_to_XYZ(self):
"""
Test :func:`colour.appearance.kim2009.Kim2009_to_XYZ` definition
n-dimensional support.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_a = 318.31
media = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
surround = VIEWING_CONDITIONS_KIM2009["Average"]
specification = XYZ_to_Kim2009(XYZ, XYZ_w, L_a, media, surround)
XYZ = Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround)
specification = CAM_Specification_Kim2009(
*np.transpose(np.tile(tsplit(specification), (6, 1))).tolist()
)
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(
Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround),
XYZ,
decimal=7,
)
XYZ_w = np.tile(XYZ_w, (6, 1))
np.testing.assert_almost_equal(
Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround),
XYZ,
decimal=7,
)
specification = CAM_Specification_Kim2009(
*tsplit(np.reshape(specification, (2, 3, 8))).tolist()
)
XYZ_w = np.reshape(XYZ_w, (2, 3, 3))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(
Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround),
XYZ,
decimal=7,
)
@ignore_numpy_errors
def test_domain_range_scale_Kim2009_to_XYZ(self):
"""
Test :func:`colour.appearance.kim2009.Kim2009_to_XYZ` definition
domain and range scale support.
"""
XYZ_i = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_a = 318.31
media = MEDIA_PARAMETERS_KIM2009["CRT Displays"]
surround = VIEWING_CONDITIONS_KIM2009["Average"]
specification = XYZ_to_Kim2009(XYZ_i, XYZ_w, L_a, media, surround)
XYZ = Kim2009_to_XYZ(specification, XYZ_w, L_a, media, surround)
d_r = (
("reference", 1, 1),
(
"1",
np.array(
[
1 / 100,
1 / 100,
1 / 360,
1 / 100,
1 / 100,
1 / 100,
1 / 400,
np.nan,
]
),
0.01,
),
(
"100",
np.array([1, 1, 100 / 360, 1, 1, 1, 100 / 400, np.nan]),
1,
),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Kim2009_to_XYZ(
specification * factor_a,
XYZ_w * factor_b,
L_a,
media,
surround,
),
XYZ * factor_b,
decimal=7,
)
@ignore_numpy_errors
def test_raise_exception_Kim2009_to_XYZ(self):
"""
Test :func:`colour.appearance.kim2009.Kim2009_to_XYZ` definition
raised exception.
"""
self.assertRaises(
ValueError,
Kim2009_to_XYZ,
CAM_Specification_Kim2009(
41.731091132513917,
None,
219.04843265831178,
),
np.array([95.05, 100.00, 108.88]),
318.31,
20.0,
VIEWING_CONDITIONS_KIM2009["Average"],
)
@ignore_numpy_errors
def test_nan_Kim2009_to_XYZ(self):
"""
Test :func:`colour.appearance.kim2009.Kim2009_to_XYZ` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
J = case[0]
C = case[0]
h = case[0]
XYZ_w = np.array(case)
L_a = case[0]
media = MediaParameters_Kim2009(case[0])
surround = InductionFactors_Kim2009(case[0], case[0], case[0])
Kim2009_to_XYZ(
CAM_Specification_Kim2009(J, C, h, M=50),
XYZ_w,
L_a,
media,
surround,
)
| 30.409692
| 79
| 0.488121
|
8be51723ecb3eb9e718a4675e48dbe31ad8519be
| 7,249
|
py
|
Python
|
datamodels/doc/source/conf.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | null | null | null |
datamodels/doc/source/conf.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | 24
|
2019-08-09T15:03:20.000Z
|
2022-03-04T10:04:48.000Z
|
datamodels/doc/source/conf.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | 4
|
2019-06-16T15:03:23.000Z
|
2020-12-02T19:51:52.000Z
|
# -*- coding: utf-8 -*-
#
# filter documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 1 21:59:45 2010.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
try:
import miri
import miri.datamodels
except ImportError:
strg = "***Please build the miri package with \'python setup.py install\'"
strg += " before building the documentation.\n"
raise ImportError(strg)
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [\
'sphinx.ext.autodoc',
'sphinx.ext.doctest'
#'sphinx.ext.intersphinx',
#'sphinx.ext.ifconfig',
#'numpydoc',
#'matplotlib.sphinxext.mathmpl',
#'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.plot_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
#master_doc = 'reference/miri_datamodels'
master_doc = 'index'
# General information about the project.
project = miri.datamodels.__project__
copyright = miri.datamodels.__copyright__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = miri.__version__
# The full version, including alpha/beta/rc tags.
release = miri.__version__ + " (MiriTE package)"
#release = release.replace('GlobalRev:','')
#release = release.replace('$','')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'miri_datamodels'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('reference/miri_datamodels', 'miri_datamodels.tex', 'MIRI Data Models Documentation',
'MIRI Software Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 32.653153
| 88
| 0.71265
|
0168b42604d1d2d0ddc1b76fab177b82426af792
| 123,968
|
py
|
Python
|
src/v96.py
|
numb3r33/Kaggle_Home_Credit
|
f8f56a0514b928d7ed4b8f38c6edc53b67bab32d
|
[
"MIT"
] | null | null | null |
src/v96.py
|
numb3r33/Kaggle_Home_Credit
|
f8f56a0514b928d7ed4b8f38c6edc53b67bab32d
|
[
"MIT"
] | 14
|
2020-01-28T22:02:01.000Z
|
2022-03-11T23:33:08.000Z
|
src/v96.py
|
numb3r33/Kaggle_Home_Credit
|
f8f56a0514b928d7ed4b8f38c6edc53b67bab32d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import scipy as sp
import argparse
import os
import gc
import time
from base import *
from features import *
from datetime import datetime
from sklearn.externals import joblib
from sklearn.model_selection import cross_val_score, StratifiedKFold
basepath = os.path.expanduser('../')
SEED = 1231
np.random.seed(SEED)
#############################################################################################################
# EXPERIMENT PARAMETERS #
#############################################################################################################
COLS_TO_REMOVE = ['SK_ID_CURR',
'TARGET',
'OCCUPATION_TYPE__5',
'OCCUPATION_TYPE__-1',
'OCCUPATION_TYPE__11',
'OCCUPATION_TYPE__15',
'ORGANIZATION_TYPE__29',
'ORGANIZATION_TYPE__5',
'FLAG_OWN_REALTY',
'FLAG_DOCUMENT_21',
'ORGANIZATION_TYPE__21',
'FLAG_DOCUMENT_14',
'ORGANIZATION_TYPE__17',
'ORGANIZATION_TYPE__27',
'ORGANIZATION_TYPE__32',
'FLAG_DOCUMENT_16',
'ORGANIZATION_TYPE__47',
'FLAG_DOCUMENT_13',
'FLAG_DOCUMENT_11',
'ORGANIZATION_TYPE__40',
'ORGANIZATION_TYPE__23',
'ORGANIZATION_TYPE__14',
'diff_max_min_credit_term',
'ORGANIZATION_TYPE__1',
'ORGANIZATION_TYPE__9',
'OCCUPATION_TYPE__nan',
'ORGANIZATION_TYPE__41',
'OCCUPATION_TYPE__7',
'FLAG_MOBIL',
'ORGANIZATION_TYPE__18',
'ORGANIZATION_TYPE__38',
'ORGANIZATION_TYPE__44',
'FLAG_DOCUMENT_12',
'ORGANIZATION_TYPE__0',
'FLAG_DOCUMENT_2',
'ORGANIZATION_TYPE__13',
'OCCUPATION_TYPE__0',
'FLAG_DOCUMENT_4',
'OCCUPATION_TYPE__16',
'ORGANIZATION_TYPE__49',
'FLAG_DOCUMENT_6',
'FLAG_DOCUMENT_9',
'ORGANIZATION_TYPE__nan',
'OCCUPATION_TYPE__12',
'ORGANIZATION_TYPE__20',
'FLAG_CONT_MOBILE',
'ORGANIZATION_TYPE__37',
'ORGANIZATION_TYPE__45',
'FLAG_EMP_PHONE',
'FLAG_DOCUMENT_17',
'LIVE_REGION_NOT_WORK_REGION',
'OCCUPATION_TYPE__17',
'NAME_TYPE_SUITE',
'ORGANIZATION_TYPE__15',
'REG_REGION_NOT_LIVE_REGION',
'FLAG_DOCUMENT_10',
'ORGANIZATION_TYPE__3',
'OCCUPATION_TYPE__2',
'ORGANIZATION_TYPE__19',
'FLAG_DOCUMENT_19',
'AMT_REQ_CREDIT_BUREAU_DAY',
'credits_ended_bureau',
'ORGANIZATION_TYPE__8',
'ORGANIZATION_TYPE__16',
'FLAG_DOCUMENT_8',
'ORGANIZATION_TYPE__25',
'OCCUPATION_TYPE__6',
'NUM_NULLS_EXT_SCORES',
'ORGANIZATION_TYPE__48',
'ORGANIZATION_TYPE__53',
'ORGANIZATION_TYPE__10',
'FLAG_DOCUMENT_7',
'ORGANIZATION_TYPE__55',
'ORGANIZATION_TYPE__24',
'NAME_EDUCATION_TYPE__0',
'ORGANIZATION_TYPE__46',
'ELEVATORS_MODE',
'NAME_EDUCATION_TYPE__nan',
'ORGANIZATION_TYPE__22',
'ORGANIZATION_TYPE__50',
'REG_REGION_NOT_WORK_REGION',
'ORGANIZATION_TYPE__56',
'FLAG_DOCUMENT_5',
'FLAG_DOCUMENT_20',
'ORGANIZATION_TYPE__2',
'ORGANIZATION_TYPE__6',
'OCCUPATION_TYPE__13',
'ORGANIZATION_TYPE__52',
'FLAG_DOCUMENT_15',
'ORGANIZATION_TYPE__43',
'AMT_REQ_CREDIT_BUREAU_HOUR',
'NAME_HOUSING_TYPE',
'ORGANIZATION_TYPE__11',
'HOUSETYPE_MODE',
'EMERGENCYSTATE_MODE',
'ORGANIZATION_TYPE__28',
'NAME_EDUCATION_TYPE__2',
'ORGANIZATION_TYPE__4',
'OCCUPATION_TYPE__14',
'ORGANIZATION_TYPE__35',
'LIVE_CITY_NOT_WORK_CITY',
'num_diff_credits',
'ORGANIZATION_TYPE__51',
'REG_CITY_NOT_WORK_CITY',
'FLAG_EMAIL',
'ORGANIZATION_TYPE__57',
'NAME_HOUSING_TYPE__0',
'NAME_INCOME_TYPE__2',
'NAME_INCOME_TYPE__5',
'NAME_HOUSING_TYPE__nan',
'NAME_INCOME_TYPE__nan',
'NAME_INCOME_TYPE__0',
'NAME_INCOME_TYPE__6',
'NAME_CONTRACT_STATUS_3',
'NAME_INCOME_TYPE__3',
'diff_balance_curr_credit',
'ratio_min_installment_balance',
'NAME_HOUSING_TYPE__4',
'CODE_REJECT_REASON_5',
'CODE_REJECT_REASON_8',
'ORGANIZATION_TYPE__33',
'CODE_REJECT_REASON_0',
'OCCUPATION_TYPE__1',
'NAME_HOUSING_TYPE__5',
'sum_num_times_prolonged',
'NAME_GOODS_CATEGORY_13',
'NAME_GOODS_CATEGORY_4',
'NAME_GOODS_CATEGORY_26',
'PRODUCT_COMBINATION_-1',
'NAME_GOODS_CATEGORY_24',
'NAME_GOODS_CATEGORY_15',
'NAME_GOODS_CATEGORY_20',
'NAME_GOODS_CATEGORY_9',
'CODE_REJECT_REASON_6',
'NAME_GOODS_CATEGORY_6',
'NAME_GOODS_CATEGORY_0',
'num_high_int_no_info_loans',
'NAME_HOUSING_TYPE__2',
'NAME_GOODS_CATEGORY_14',
'NAME_GOODS_CATEGORY_17',
'PRODUCT_COMBINATION_16',
'PRODUCT_COMBINATION_15',
'OCCUPATION_TYPE__10',
'PRODUCT_COMBINATION_14',
'NAME_GOODS_CATEGORY_1',
'NAME_GOODS_CATEGORY_12',
'NAME_GOODS_CATEGORY_21',
'NAME_GOODS_CATEGORY_25',
'OCCUPATION_TYPE__9',
'NAME_GOODS_CATEGORY_10',
'NAME_GOODS_CATEGORY_16',
'NAME_GOODS_CATEGORY_8',
'mean_CODE_GENDER_ORGANIZATION_TYPE_DAYS_REGISTRATION',
'FLAG_DOCUMENT_18',
'NAME_GOODS_CATEGORY_18',
'ORGANIZATION_TYPE__30',
'sum_CODE_GENDER_NAME_EDUCATION_TYPE_OWN_CAR_AGE',
'ORGANIZATION_TYPE__12',
'NAME_EDUCATION_TYPE__3',
'ORGANIZATION_TYPE__36',
'ORGANIZATION_TYPE__34',
'total_bureau_prev_app_live_debt',
'total_bureau_prev_app_live_debt_to_income',
'total_live_debt_credit',
'external_scores_nan_median',
'months_left_to_pay',
'new_user_date',
'recent_employment',
'young_age',
'actual_proposed_termination',
'completed_to_total',
'mean_status',
'median_num_bureau_balance',
'EXT_3_2',
'EXT_1_3',
'EXT_1_2',
'EXT_1_3_gm',
'EXT_2_3_gm',
'EXT_1_2_3_gm',
'EXT_1_2_sum',
'EXT_1_3_sum',
'EXT_2_3_sum',
'EXT_1_2_div',
'EXT_1_3_div',
'EXT_2_3_div',
'EXT_1_2_mean',
'EXT_2_3_mean',
'EXT_1_3_mean',
'weighted_mean_external_scores',
'external_scores_nan_median',
'EXT_SOURCE_DEV',
'EXT_SOURCE_SUM',
'MEAN_EXTERNAL_SCORE',
'mean_EXT_SOURCE_2_num_nulls',
'ratio_annuity_score_1',
'ratio_annuity_score_2',
'ratio_annuity_score_3',
'ratio_credit_annuity_score_1',
'ratio_credit_annuity_score_2',
'ratio_credit_annuity_score_3',
'annuity_div_income_ext_source_2',
'annuity_sub_income_ext_source_2',
'annuity_div_credit_ext_source_2',
'annuity_sub_credit_ext_source_2',
'mult_age_ext_source_1',
'mult_age_ext_source_2',
'mult_age_ext_source_3',
'div_age_ext_source_1',
'div_age_ext_source_2',
'div_age_ext_source_3',
'debt_to_credit_ext_source_2_mult',
'max_debt_to_credit_ext_source_2_mult',
'diff_code_gender_organization_type_source_2_mean',
'diff_code_gender_occupation_source_3_mean',
'diff_family_income_ext_source_3_mean',
'diff_education_occupation_source_3_mean',
'diff_income_type_education_type_source_2_mean',
'diff_family_income_ext_source_2_mean',
'diff_organization_ext_source_3_mean',
'diff_occupation_source_3_mean',
'diff_name_income_type_occupation_source_2_mean',
'diff_organization_ext_source_2_mean',
'mean_CODE_GENDER_OCCUPATION_TYPE_EXT_SOURCE_2',
'mean_CODE_GENDER_NAME_EDUCATION_TYPE_EXT_SOURCE_2',
'diff_organization_ext_source_1_mean',
'diff_income_ext_source_1_mean',
'var_CODE_GENDER_NAME_EDUCATION_TYPE_EXT_SOURCE_2',
'mean_CODE_GENDER_OCCUPATION_TYPE_EXT_SOURCE_1',
'mean_CODE_GENDER_NAME_EDUCATION_TYPE_EXT_SOURCE_1',
'diff_code_gender_organization_type_source_1_mean',
'ext_3_age',
'EXT_SOURCE_3',
'ext_2_age',
'diff_occupation_source_2_mean',
'diff_education_occupation_source_2_mean',
'ratio_annuity_credit_ext_source_3'
'add_mult_age_employed_ext_2',
'EXT_SOURCE_2',
'region_ext_source_3',
'mult_annuity_credit_ext_source_3',
'diff_code_gender_occupation_source_2_mean',
'diff_income_ext_source_3_mean',
'EXT_SOURCE_1',
'ratio_annuity_credit_ext_source_2',
'diff_family_education_ext_source_3_mean',
'mult_annuity_credit_ext_source_2',
'diff_family_income_ext_source_1_mean',
'diff_code_gender_name_education_type_source_2_mean',
'diff_education_ext_source_1_mean',
'add_rate_ext_2',
'diff_income_ext_source_2_mean',
'diff_family_education_ext_source_1_mean',
'diff_education_ext_source_2_mean',
'diff_education_ext_source_3_mean',
'diff_code_gender_occupation_source_1_mean',
'diff_code_gender_name_education_type_source_1_mean',
'mean_OCCUPATION_TYPE_ORGANIZATION_TYPE_EXT_SOURCE_2',
'mean_NAME_EDUCATION_TYPE_OCCUPATION_TYPE_EXT_SOURCE_2',
'mean_NAME_INCOME_TYPE_OCCUPATION_TYPE_EXT_SOURCE_2',
'mean_NAME_EDUCATION_TYPE_OCCUPATION_TYPE_EXT_SOURCE_1',
'mean_NAME_FAMILY_STATUS_NAME_EDUCATION_TYPE_EXT_SOURCE_2',
'rate_annuity_region_ext_source_2',
'mean_OCCUPATION_TYPE_ratio_annuity_credit',
'diff_family_education_ext_source_2_mean',
'ratio_annuity_credit_ext_source_1',
'mean_NAME_EDUCATION_TYPE_EXT_SOURCE_2',
'mean_NAME_EDUCATION_TYPE_OCCUPATION_TYPE_REG_CITY_NOT_WORK_CITY_EXT_SOURCE_2',
'ratio_annuity_credit_ext_source_3',
'add_mult_age_employed_ext_2',
'diff_education_occupation_source_1_mean',
'mult_annuity_credit_ext_source_1',
'mean_NAME_INCOME_TYPE_NAME_EDUCATION_TYPE_EXT_SOURCE_2'
]
PARAMS = {
'num_boost_round': 10000,
'early_stopping_rounds': 200,
'boosting_type': 'gbdt',
'objective': 'binary',
'learning_rate': .02,
'metric': 'auc',
'max_depth': 4,
'num_leaves': 58,
'sub_feature': 0.10,
'feature_fraction_seed': SEED,
'bagging_fraction': 0.89,
'bagging_seed': SEED,
'min_data_in_leaf': 57,
'max_bin': 300,
'lambda_l1': 0.05,
'lambda_l2': 51,
'min_split_gain': 0.05,
'min_child_weight': 77,
'nthread': 8,
'verbose': -1,
'seed': SEED
}
PCA_PARAMS = {
'n_components': 10,
'whiten': True,
'random_state': SEED
}
MODEL_FILENAME = 'v96'
SAMPLE_SIZE = .1
# NOTE: column in frequency encoded columns
# cannot be in ohe cols.
FREQ_ENCODING_COLS = ['ORGANIZATION_OCCUPATION',
'age_emp_categorical',
'age_occupation'
]
OHE_COLS = [
'ORGANIZATION_TYPE',
'OCCUPATION_TYPE',
'NAME_EDUCATION_TYPE',
'NAME_HOUSING_TYPE',
'NAME_INCOME_TYPE'
]
class Modelv96(BaseModel):
def __init__(self, **params):
self.params = params
self.n_train = 307511 # TODO: find a way to remove this constant
def load_data(self, filenames):
dfs = []
for filename in filenames:
dfs.append(pd.read_csv(filename, parse_dates=True, keep_date_col=True))
df = pd.concat(dfs)
df.index = np.arange(len(df))
df = super(Modelv96, self).reduce_mem_usage(df)
return df
def reduce_mem_usage(self, df):
return super(Modelv96, self).reduce_mem_usage(df)
def preprocess(self):
tr = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_train.pkl'))
te = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_test.pkl'))
ntrain = len(tr)
data = pd.concat((tr, te))
del tr, te
gc.collect()
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_train.pkl')):
print('Generating features based on current application ....')
t0 = time.clock()
data, FEATURE_NAMES = current_application_features(data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on current application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
print('Generating features based on credits reported to bureau ....')
t0 = time.clock()
data, FEATURE_NAMES = bureau_features(bureau, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
del bureau
gc.collect()
else:
print('Already generated features based on bureau application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
bureau_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau_balance.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
for col in bureau_bal.select_dtypes(include=['category']).columns:
bureau_bal.loc[:, col] = bureau_bal.loc[:, col].cat.codes
print('Generating features based on credits reported to bureau and bureau balance ....')
t0 = time.clock()
data, FEATURE_NAMES = bureau_and_balance(bureau, bureau_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on bureau and balance')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
print('Generating features based on previous application ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_features(prev_app, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on previous application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_train.pkl')):
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
print('Generating features based on pos cash ....')
t0 = time.clock()
data, FEATURE_NAMES = pos_cash_features(pos_cash, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del pos_cash
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on pos cash')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_train.pkl')):
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on Credit Card ....')
t0 = time.clock()
data, FEATURE_NAMES = credit_card_features(credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del credit_bal
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Credit Card')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_train.pkl')):
installments = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'installments_payments.pkl'))
for col in installments.select_dtypes(include=['category']).columns:
installments.loc[:, col] = installments.loc[:, col].cat.codes
print('Generating features based on Installments ....')
t0 = time.clock()
data, FEATURE_NAMES = get_installment_features(installments, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del installments
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Installments')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Bureau Applications....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_bureau(prev_app, bureau, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del bureau, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Bureau Applications')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Credit card balance ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_credit_card(prev_app, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del credit_bal, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Credit card balance')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
installments = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'installments_payments.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in installments.select_dtypes(include=['category']).columns:
installments.loc[:, col] = installments.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Installment Payments ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_installments(prev_app, installments, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del installments, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Installment Payments.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on loan stacking ....')
t0 = time.clock()
data, FEATURE_NAMES = loan_stacking(bureau, prev_app, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
del bureau
gc.collect()
else:
print('Already generated features based on loan stacking.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_train.pkl')):
print('Generating features based on feature groups ....')
t0 = time.clock()
data, FEATURE_NAMES = feature_groups(data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on feature groups.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_train.pkl')):
print('Generating features based on previous application and pos cash ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
t0 = time.clock()
data, FEATURE_NAMES = prev_app_pos(prev_app, pos_cash, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on previous application and pos cash.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_train.pkl')):
print('Generating features based on previous application, pos cash and credit card balance ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
t0 = time.time()
data, FEATURE_NAMES = prev_app_pos_credit(prev_app, pos_cash, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_test.pkl'))
print('\nTook: {} seconds'.format(time.time() - t0))
else:
print('Already generated features based on previous application, pos cash and credit card balance.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_train.pkl')):
print('Generating features based on previous application one hot encoded features ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
t0 = time.time()
data, FEATURE_NAMES = prev_app_ohe(prev_app, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_test.pkl'))
print('\nTook: {} seconds'.format(time.time() - t0))
else:
print('Already generated features based on previous application one hot encode features.')
def prepare_features(self):
tr = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_train.pkl'))
te = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'application_test.pkl'))
ntrain = len(tr)
data = pd.concat((tr, te))
del tr, te
gc.collect()
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_train.pkl')):
print('Generating features based on current application ....')
t0 = time.clock()
data, FEATURE_NAMES = current_application_features(data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'current_application_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on current application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
print('Generating features based on credits reported to bureau ....')
t0 = time.clock()
data, FEATURE_NAMES = bureau_features(bureau, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
del bureau
gc.collect()
else:
print('Already generated features based on bureau application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
bureau_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau_balance.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
for col in bureau_bal.select_dtypes(include=['category']).columns:
bureau_bal.loc[:, col] = bureau_bal.loc[:, col].cat.codes
print('Generating features based on credits reported to bureau and bureau balance ....')
t0 = time.clock()
data, FEATURE_NAMES = bureau_and_balance(bureau, bureau_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'bureau_bal_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on bureau and balance')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
print('Generating features based on previous application ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_features(prev_app, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on previous application')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_train.pkl')):
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
print('Generating features based on pos cash ....')
t0 = time.clock()
data, FEATURE_NAMES = pos_cash_features(pos_cash, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del pos_cash
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'pos_cash_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on pos cash')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_train.pkl')):
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on Credit Card ....')
t0 = time.clock()
data, FEATURE_NAMES = credit_card_features(credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del credit_bal
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'credit_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Credit Card')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_train.pkl')):
installments = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'installments_payments.pkl'))
for col in installments.select_dtypes(include=['category']).columns:
installments.loc[:, col] = installments.loc[:, col].cat.codes
print('Generating features based on Installments ....')
t0 = time.clock()
data, FEATURE_NAMES = get_installment_features(installments, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del installments
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'installments_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Installments')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Bureau Applications....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_bureau(prev_app, bureau, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del bureau, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_bureau_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Bureau Applications')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Credit card balance ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_credit_card(prev_app, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del credit_bal, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_credit_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Credit card balance')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_train.pkl')):
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
installments = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'installments_payments.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in installments.select_dtypes(include=['category']).columns:
installments.loc[:, col] = installments.loc[:, col].cat.codes
print('Generating features based on Previous Applications and Installment Payments ....')
t0 = time.clock()
data, FEATURE_NAMES = prev_app_installments(prev_app, installments, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
del installments, prev_app
gc.collect()
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_installments_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on Previous application and Installment Payments.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_train.pkl')):
bureau = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'bureau.pkl'))
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in bureau.select_dtypes(include=['category']).columns:
bureau.loc[:, col] = bureau.loc[:, col].cat.codes
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
print('Generating features based on loan stacking ....')
t0 = time.clock()
data, FEATURE_NAMES = loan_stacking(bureau, prev_app, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'loan_stacking_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
del bureau
gc.collect()
else:
print('Already generated features based on loan stacking.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_train.pkl')):
print('Generating features based on feature groups ....')
t0 = time.clock()
data, FEATURE_NAMES = feature_groups(data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'feature_groups_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on feature groups.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_train.pkl')):
print('Generating features based on previous application and pos cash ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
t0 = time.clock()
data, FEATURE_NAMES = prev_app_pos(prev_app, pos_cash, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_test.pkl'))
print('\nTook: {} seconds'.format(time.clock() - t0))
else:
print('Already generated features based on previous application and pos cash.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_train.pkl')):
print('Generating features based on previous application, pos cash and credit card balance ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
pos_cash = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'POS_CASH_balance.pkl'))
credit_bal = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'credit_card_balance.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
for col in pos_cash.select_dtypes(include=['category']).columns:
pos_cash.loc[:, col] = pos_cash.loc[:, col].cat.codes
for col in credit_bal.select_dtypes(include=['category']).columns:
credit_bal.loc[:, col] = credit_bal.loc[:, col].cat.codes
t0 = time.time()
data, FEATURE_NAMES = prev_app_pos_credit(prev_app, pos_cash, credit_bal, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_pos_cash_credit_bal_test.pkl'))
print('\nTook: {} seconds'.format(time.time() - t0))
else:
print('Already generated features based on previous application, pos cash and credit card balance.')
if not os.path.exists(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_train.pkl')):
print('Generating features based on previous application one hot encoded features ....')
prev_app = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + 'previous_application.pkl'))
for col in prev_app.select_dtypes(include=['category']).columns:
prev_app.loc[:, col] = prev_app.loc[:, col].cat.codes
t0 = time.time()
data, FEATURE_NAMES = prev_app_ohe(prev_app, data)
data.index = np.arange(len(data))
# fill infrequent values
data = super(Modelv96, self).fill_infrequent_values(data)
data.iloc[:ntrain].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_train.pkl'))
data.iloc[ntrain:].loc[:, FEATURE_NAMES].to_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'prev_app_ohe_test.pkl'))
print('\nTook: {} seconds'.format(time.time() - t0))
else:
print('Already generated features based on previous application one hot encode features.')
# This method currently takes care of loading engineered features from disk
# and merging train and test to report back a dataframe (data) which can be used by
# other layers.
def merge_datasets(self):
def get_filenames():
filenames = [f'application_',
f'current_application_',
f'bureau_',
f'bureau_bal_',
f'prev_app_',
f'pos_cash_',
f'credit_',
f'installments_',
f'prev_app_bureau_',
f'prev_app_credit_',
f'prev_app_installments_',
f'loan_stacking_',
f'feature_groups_',
f'prev_app_pos_cash_',
f'prev_app_pos_cash_credit_bal_',
f'prev_app_ohe_'
]
return filenames
train = []
test = []
filenames = get_filenames()
for filename_ in filenames:
tmp = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'{filename_}train.pkl'))
tmp.index = np.arange(len(tmp))
train.append(tmp)
for filename_ in filenames:
tmp = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + 'feature_groups/' + f'{filename_}test.pkl'))
tmp.index = np.arange(len(tmp))
test.append(tmp)
return pd.concat(train, axis=1), pd.concat(test, axis=1)
def feature_interaction(self, data, key, agg_feature, agg_func, agg_func_name):
key_name = '_'.join(key)
tmp = data.groupby(key)[agg_feature].apply(agg_func)\
.reset_index()\
.rename(columns={agg_feature: f'{agg_func_name}_{key_name}_{agg_feature}'})
feat_name = f'{agg_func_name}_{key_name}_{agg_feature}'
data.loc[:, feat_name] = data.loc[:, key].merge(tmp, on=key, how='left')[feat_name]
return data, feat_name
def feature_preprocessing(self, data):
# current application preprocessing
data['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True)
data['CODE_GENDER'].replace(2, np.nan, inplace=True)
data['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
# previous application
data['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace=True)
data['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace=True)
data['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace=True)
data['DAYS_LAST_DUE'].replace(365243, np.nan, inplace=True)
data['DAYS_TERMINATION'].replace(365243, np.nan, inplace=True)
return data
def add_missing_values_flag(self, data):
# preprocess for pca
SKIP_COLS = ['SK_ID_CURR', 'TARGET']
for col in data.columns.drop(SKIP_COLS):
# replace inf with np.nan
data[col] = data[col].replace([np.inf, -np.inf], np.nan)
# fill missing values with median
if data[col].isnull().sum():
data[f'{col}_flag'] = data[col].isnull().astype(np.uint8)
if pd.isnull(data[col].median()):
data[col] = data[col].fillna(-1)
else:
data[col] = data[col].fillna(data[col].median())
return data
def get_features(self, train, test, compute_ohe):
data = pd.concat((train, test))
data.index = np.arange(len(data))
for col in data.select_dtypes(include=['category']).columns:
data[col] = data[col].cat.codes
# TODO: not very happy with the way we are computing interactions
# because if we omit any of this feature from pipeline it would
# still work but would most likely be a feature of all null values.
# flag for age >= 43
data.loc[:, 'age_ge_43'] = ((-data.DAYS_BIRTH / 365) > 43).astype(np.uint8)
# flag for age over 43 and income type Pensioner
data.loc[:, 'age_ge_43_and_income_pensioner'] = (((-data.DAYS_BIRTH / 365) > 43) & (data.NAME_INCOME_TYPE == 3)).astype(np.uint8)
# concatenate OCCUPATION TYPE AND ORGANIZATION TYPE
data.loc[:, 'ORGANIZATION_OCCUPATION'] = pd.factorize(data.ORGANIZATION_TYPE.astype(np.str) +\
data.OCCUPATION_TYPE.astype(np.str)
)[0]
# interaction between total debt to income and (annuity / credit)
data.loc[:, 'debt_income_to_annuity_credit'] = data.total_debt_to_income / data.ratio_annuity_credit
# interaction between days birth and ratio of annuity to credit
data.loc[:, 'add_days_birth_annuity_credit'] = data.DAYS_BIRTH + data.ratio_annuity_credit
# interaction between ratio of annuity to credit with external source 2 score
data.loc[:, 'mult_annuity_credit_ext_source_2'] = data.ratio_annuity_credit * data.EXT_SOURCE_2
data.loc[:, 'ratio_annuity_credit_ext_source_2'] = data.ratio_annuity_credit / data.EXT_SOURCE_2.map(np.log1p)
data.loc[:, 'mult_annuity_credit_ext_source_1'] = data.ratio_annuity_credit * data.EXT_SOURCE_1
data.loc[:, 'ratio_annuity_credit_ext_source_1'] = data.ratio_annuity_credit / data.EXT_SOURCE_1.map(np.log1p)
data.loc[:, 'mult_annuity_credit_ext_source_3'] = data.ratio_annuity_credit * data.EXT_SOURCE_3
data.loc[:, 'ratio_annuity_credit_ext_source_3'] = data.ratio_annuity_credit / data.EXT_SOURCE_3.map(np.log1p)
# interaction between ratio of annuity to credit with total amount paid in installments
data.loc[:, 'mult_annuity_credit_amt_payment_sum'] = data.ratio_annuity_credit * data.AMT_PAYMENT_sum
# interaction between total amount paid in installments and delay in installments
data.loc[:, 'mult_amt_payment_sum_delay_installment'] = data.AMT_PAYMENT_sum * data.delay_in_installment_payments
# interaction between credit / annuity and age
data.loc[:, 'diff_credit_annuity_age'] = (data.AMT_CREDIT / data.AMT_ANNUITY) - (-data.DAYS_BIRTH / 365)
# interaction between ext_3 and age
data.loc[:, 'ext_3_age'] = data.EXT_SOURCE_3 * (-data.DAYS_BIRTH / 365)
# interaction between ext_2 and age
data.loc[:, 'ext_2_age'] = data.EXT_SOURCE_2 * (-data.DAYS_BIRTH / 365)
# interaction between rate and external source 2
data.loc[:, 'add_rate_ext_2'] = (data.AMT_CREDIT / data.AMT_ANNUITY) + data.EXT_SOURCE_2
# interaction between rate and age
data.loc[:, 'add_rate_age'] = (data.AMT_CREDIT / data.AMT_ANNUITY) + (-data.DAYS_BIRTH / 365)
# interaction between age and employed and external score 2
data.loc[:, 'add_mult_age_employed_ext_2'] = ((-data.DAYS_BIRTH / 365) +\
(-data.DAYS_EMPLOYED.replace({365243: np.nan}))) *\
(data.EXT_SOURCE_2)
# combine ratio annuity credit, region populative relative and ext source 2
data.loc[:, 'rate_annuity_region_ext_source_2'] = data.ratio_annuity_credit * data.REGION_POPULATION_RELATIVE * data.EXT_SOURCE_2
data.loc[:, 'region_ext_source_3'] = data.REGION_POPULATION_RELATIVE * data.EXT_SOURCE_3
# Relationship between AMT_REQ_CREDIT_BUREAU_HOUR and AMT_REQ_CREDIT_BUREAU_YEAR
data.loc[:, 'ratio_check_hour_to_year'] = data.AMT_REQ_CREDIT_BUREAU_HOUR.div(data.AMT_REQ_CREDIT_BUREAU_YEAR)
# Relationship between Income and ratio annuity credit
data.loc[:, 'mult_ratio_income'] = (data.ratio_annuity_credit * data.AMT_INCOME_TOTAL).map(np.log1p)
data.loc[:, 'div_ratio_income'] = (data.AMT_INCOME_TOTAL / data.ratio_annuity_credit).map(np.log1p)
# Gender, Education and other features
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_code_gender_name_education_type_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_2', np.var, 'var')
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_code_gender_name_education_type_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_code_gender_name_education_type_amt_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_code_gender_name_education_type_amt_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'OWN_CAR_AGE', np.max, 'max')
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'OWN_CAR_AGE', np.sum, 'sum')
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_code_gender_education_type_age'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_code_gender_education_type_empl'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'NAME_EDUCATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_code_gender_education_type_income'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Gender, Occupation and other features
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_code_gender_occupation_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_code_gender_occupation_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_code_gender_occupation_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_code_gender_occupation_days_birth_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_code_gender_occupation_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_code_gender_occupation_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_code_gender_occupation_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'OCCUPATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_code_gender_occupation_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
# Gender, Organization and other features
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_amt_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'DAYS_REGISTRATION', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_days_reg_mean'] = data[feat_name] - data['DAYS_REGISTRATION']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_age_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_empl_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'ORGANIZATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_code_gender_organization_type_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
# Gender, Reg city not work city and other fatures
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_code_gender_reg_city_amount_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'CNT_CHILDREN', np.mean, 'mean')
data.loc[:, 'diff_code_gender_reg_city_cnt_children_mean'] = data[feat_name] - data['CNT_CHILDREN']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'DAYS_ID_PUBLISH', np.mean, 'mean')
data.loc[:, 'diff_code_gender_reg_city_days_id_mean'] = data[feat_name] - data['DAYS_ID_PUBLISH']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_code_gender_reg_city_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_code_gender_reg_city_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_code_gender_reg_city_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_code_gender_reg_city_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Income, Occupation and Ext Score
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_name_income_type_occupation_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_name_income_type_occupation_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_name_income_type_occupation_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_name_income_type_occupation_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_name_income_type_occupation_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_name_income_type_occupation_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_name_income_type_occupation_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'OCCUPATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_name_income_type_occupation_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Occupation and Organization and Ext Score
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_occupation_organization_source_2_mean'] = data[feat_name] - data['DAYS_ID_PUBLISH']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_occupation_organization_source_1_mean'] = data[feat_name] - data['DAYS_ID_PUBLISH']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_occupation_organization_source_3_mean'] = data[feat_name] - data['DAYS_ID_PUBLISH']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_occupation_organization_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_occupation_organization_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_occupation_organization_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_occupation_organization_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE', 'ORGANIZATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_occupation_organization_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Income, Education and Ext score
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_income_type_education_type_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_income_type_education_type_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_income_type_education_type_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_income_type_education_type_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_income_type_education_type_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_income_type_education_type_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_income_type_education_type_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_income_type_education_type_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Education and Occupation and other features
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_amt_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'OWN_CAR_AGE', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_car_age_mean'] = data[feat_name] - data['OWN_CAR_AGE']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Education, Occupation, Reg city not work city and other features
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_ext_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_ext_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_ext_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_education_occupation_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Occupation and other features
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_occupation_reg_city_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'CNT_CHILDREN', np.mean, 'mean')
data.loc[:, 'diff_occupation_cnt_children_mean'] = data[feat_name] - data['CNT_CHILDREN']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'CNT_FAM_MEMBERS', np.mean, 'mean')
data.loc[:, 'diff_occupation_cnt_fam_mebers_mean'] = data[feat_name] - data['CNT_FAM_MEMBERS']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_occupation_days_birth_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_occupation_days_employed_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_occupation_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_occupation_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_occupation_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'OWN_CAR_AGE', np.mean, 'mean')
data.loc[:, 'diff_occupation_own_car_age_mean'] = data[feat_name] - data['OWN_CAR_AGE']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'YEARS_BUILD_AVG', np.mean, 'mean')
data.loc[:, 'diff_occupation_year_build_mean'] = data[feat_name] - data['YEARS_BUILD_AVG']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'ratio_annuity_credit', np.mean, 'mean')
data.loc[:, 'diff_occupation_annuity_credit_mean'] = data[feat_name] - data['ratio_annuity_credit']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_occupation_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_occupation_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['OCCUPATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_occupation_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Organization type and other features
data, feat_name = self.feature_interaction(data, ['ORGANIZATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_organization_ext_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['ORGANIZATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_organization_ext_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['ORGANIZATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_organization_ext_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['ORGANIZATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_organization_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['ORGANIZATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_organization_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['ORGANIZATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_organization_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['ORGANIZATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_organization_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['ORGANIZATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_organization_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# INCOME Type and other features
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_income_ext_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data.loc[:, 'ratio_income_ext_source_1_mean'] = data[feat_name] / data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_income_ext_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_income_ext_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_income_ext_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_income_ext_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_income_ext_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_income_ext_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_INCOME_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_income_ext_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# EDUCATION Type and other features
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_education_ext_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_education_ext_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_education_ext_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_education_ext_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_education_ext_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_education_ext_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_education_ext_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_EDUCATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_education_ext_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Family Type and Income Type
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_INCOME_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_family_income_ext_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_INCOME_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_family_income_ext_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_INCOME_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_family_income_ext_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_INCOME_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_family_income_ext_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_INCOME_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_family_income_ext_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_INCOME_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_family_income_ext_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_INCOME_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_family_income_ext_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_INCOME_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_family_income_ext_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Family Type and Education Type
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_family_education_ext_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_family_education_ext_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_EDUCATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_family_education_ext_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_EDUCATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_family_education_ext_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_EDUCATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_family_education_ext_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_EDUCATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_family_education_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_EDUCATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_family_education_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'NAME_EDUCATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_family_education_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Family Type, Organization Type
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_family_organization_ext_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_family_organization_ext_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'ORGANIZATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_family_organization_ext_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'ORGANIZATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_family_organization_ext_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'ORGANIZATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_family_organization_ext_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'ORGANIZATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_family_organization_ext_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'ORGANIZATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_family_organization_ext_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'ORGANIZATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_family_organization_ext_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# Family Type, Occupation Type
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'OCCUPATION_TYPE'], 'EXT_SOURCE_1', np.mean, 'mean')
data.loc[:, 'diff_family_occupation_ext_source_1_mean'] = data[feat_name] - data['EXT_SOURCE_1']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'OCCUPATION_TYPE'], 'EXT_SOURCE_2', np.mean, 'mean')
data.loc[:, 'diff_family_occupation_ext_source_2_mean'] = data[feat_name] - data['EXT_SOURCE_2']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'OCCUPATION_TYPE'], 'EXT_SOURCE_3', np.mean, 'mean')
data.loc[:, 'diff_family_occupation_ext_source_3_mean'] = data[feat_name] - data['EXT_SOURCE_3']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'OCCUPATION_TYPE'], 'DAYS_BIRTH', np.mean, 'mean')
data.loc[:, 'diff_family_occupation_ext_age_mean'] = data[feat_name] - data['DAYS_BIRTH']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'OCCUPATION_TYPE'], 'DAYS_EMPLOYED', np.mean, 'mean')
data.loc[:, 'diff_family_occupation_ext_empl_mean'] = data[feat_name] - data['DAYS_EMPLOYED']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'OCCUPATION_TYPE'], 'AMT_CREDIT', np.mean, 'mean')
data.loc[:, 'diff_family_occupation_ext_credit_mean'] = data[feat_name] - data['AMT_CREDIT']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'OCCUPATION_TYPE'], 'AMT_ANNUITY', np.mean, 'mean')
data.loc[:, 'diff_family_occupation_ext_annuity_mean'] = data[feat_name] - data['AMT_ANNUITY']
data, feat_name = self.feature_interaction(data, ['NAME_FAMILY_STATUS', 'OCCUPATION_TYPE'], 'AMT_INCOME_TOTAL', np.mean, 'mean')
data.loc[:, 'diff_family_occupation_ext_income_mean'] = data[feat_name] - data['AMT_INCOME_TOTAL']
# frequency encoding of some of the categorical variables.
data = frequency_encoding(data, FREQ_ENCODING_COLS)
# add pca components
if os.path.exists(os.path.join(basepath, self.params['output_path'] + f'{self.params["data_folder"]}pca.pkl')):
pca_components = pd.read_pickle(os.path.join(basepath, self.params['output_path'] + f'{self.params["data_folder"]}pca.pkl'))
else:
pca_components = super(Modelv96, self).add_pca_components(data.copy(), PCA_PARAMS)
pca_components.to_pickle(os.path.join(basepath, self.params['output_path'] + f'{self.params["data_folder"]}pca.pkl'))
pca_components.index = data.index
data = pd.concat((data, pca_components), axis=1)
# one hot encoding of some of the categorical variables controlled by a flag
# if flag is True then one hot encoding else do frequency encoding.
if compute_ohe:
data = super(Modelv96, self).prepare_ohe(data, OHE_COLS, drop_col=True)
else:
data = frequency_encoding(data, OHE_COLS)
return data
# This method would perform feature engineering on merged datasets.
def fe(self, train, test, compute_ohe=True):
original_train = train.copy()
data = self.get_features(original_train, test, compute_ohe)
train = data.iloc[:len(train)]
test = data.iloc[len(train):]
del data, original_train
gc.collect()
return train, test
# This method just calls the base class with X,y, Xte and yte in the right format
# to train and returns a trained model which could be dumped on disk for further use.
# TODO: Find out why we are not able to load back model from disk and generate correct predictions
# there seems to be some issue in it right now.
def train(self, train, test, feature_list, is_eval, TARGET_NAME='TARGET', **params):
X = train.loc[:, feature_list]
y = train.loc[:, TARGET_NAME]
Xte = test.loc[:, feature_list]
yte = []
if is_eval:
yte = test.loc[:, TARGET_NAME]
return super(Modelv96, self).train_lgb(X, y, Xte, yte, **params)
# This method just takes in a model and test dataset and returns predictions
# prints out AUC on the test dataset as well in the process.
def evaluate(self, test, feature_list, is_eval, model, TARGET_NAME='TARGET'):
Xte = test.loc[:, feature_list]
yte = []
if is_eval:
yte = test.loc[:, TARGET_NAME]
return super(Modelv96, self).evaluate_lgb(Xte, yte, model)
def cross_validate(self, train, feature_list, params, cv_adversarial_filepath=None, categorical_feature='auto', TARGET_NAME='TARGET'):
Xtr = train.loc[:, feature_list]
ytr = train.loc[:, TARGET_NAME]
return super(Modelv96, self).cross_validate(Xtr,
ytr,
params,
cv_adversarial_filepath=cv_adversarial_filepath,
categorical_feature=categorical_feature
)
def oof_preds(self, train, test, feature_list, model, TARGET_NAME='TARGET'):
X = train.loc[:, feature_list]
y = train.loc[:, TARGET_NAME]
Xte = test.loc[:, feature_list]
return super(Modelv96, self).oof_preds(X, y, Xte, model)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Home Credit Default Risk Solution')
parser.add_argument('-input_path', help='Path to input directory') # path to raw files
parser.add_argument('-output_path', help='Path to output directory') # path to working data folder
parser.add_argument('-data_folder', help='Folder name of the dataset') # dataset folder name
parser.add_argument('-p', type=bool, help='Preprocess')
parser.add_argument('-cv', type=bool, help='Cross Validation')
parser.add_argument('-v', type=str, help='Validation')
parser.add_argument('-features', type=bool, help='Generate Features')
parser.add_argument('-s', type=bool, help='Whether to work on a sample or not.')
parser.add_argument('-seed', type=int, help='Random SEED')
parser.add_argument('-cv_seed', type=int, help='CV SEED')
parser.add_argument('-oof', type=bool, help='OOF preds for training and test set.')
parser.add_argument('-t', type=bool, help='Full Training Loop.')
parser.add_argument('-ensemble', type=bool , help='Average out predictions.')
args = parser.parse_args()
if args.p:
print('Preprocessing ...')
input_path = args.input_path
output_path = args.output_path
params = {
'input_path': input_path,
'output_path': output_path
}
m = Modelv96(**params)
m.preprocess()
elif args.features:
print('Generating features ...')
print()
input_path = args.input_path
output_path = args.output_path
params = {
'input_path': input_path,
'output_path': output_path,
}
m = Modelv96(**params)
m.prepare_features()
elif args.v is not None and len(args.v):
print('Train and generate predictions on a fold')
input_path = args.input_path
output_path = args.output_path
data_folder = args.data_folder
fold_indicator = args.v
is_sample = args.s
cv_seed = args.cv_seed
SEED = int(args.seed)
print('*' * 100)
print('SEED FOUND: {}'.format(SEED))
params = {
'input_path': input_path,
'output_path': output_path,
'data_folder': data_folder
}
PARAMS = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{cv_seed}_params.pkl'))
# Set seed to Params
PARAMS['seed'] = SEED
PARAMS['feature_fraction_seed'] = SEED
PARAMS['bagging_seed'] = SEED
PARAMS['early_stopping_rounds'] = None # explicitly make it None
print('*' * 100)
print('PARAMS: {}'.format(PARAMS))
m = Modelv96(**params)
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}data.h5')):
print('Loading dataset from disk ...')
data = pd.read_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
else:
print('Merge feature groups and save them to disk ...')
train, test = m.merge_datasets()
train, test = m.fe(train, test)
data = pd.concat((train, test))
data = m.reduce_mem_usage(data)
data.to_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
del train, test
gc.collect()
ite = pd.read_csv(os.path.join(basepath, input_path + 'cv_adversarial_idx_v1.csv'), usecols=[fold_indicator])[fold_indicator].values
print('Shape of fold indices ', len(ite))
itr = np.array(list(set(data.iloc[:m.n_train].index) - set(ite)))
# train = data.iloc[:m.n_train].iloc[itr]
# test = data.iloc[:m.n_train].iloc[ite]
train = data.loc[data.index.isin(itr)]
test = data.loc[data.index.isin(ite)]
del data
gc.collect()
if is_sample:
print('*' * 100)
print('Take a random sample of the training data ...')
train = train.sample(frac=SAMPLE_SIZE)
# check to see if feature list exists on disk or not for a particular model
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy')):
feature_list = np.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'))
else:
feature_list = train.columns.tolist()
feature_list = list(set(feature_list) - set(COLS_TO_REMOVE))
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'), feature_list)
# print features with null percentage
print('Top-5 features with highest percentage of null values ...\n')
print((train.loc[:, feature_list].isnull().sum() / len(train)).sort_values(ascending=False).iloc[:5])
# print number of features explored in the experiment
print('*' * 100)
print('Number of features: {}'.format(len(feature_list)))
print('*' * 100)
model_identifier = f'{data_folder}{MODEL_FILENAME}_{fold_indicator}_{SEED}'
if os.path.exists(os.path.join(basepath, output_path + f'{model_identifier}_model.txt')):
print('Loading model from disk ...')
model = lgb.Booster(model_file=os.path.join(basepath, output_path + f'{model_identifier}_model.txt'))
yhold = test.TARGET
hold_preds = np.array(model.predict(test.loc[:, feature_list]))
print('AUC score: {}'.format(roc_auc_score(yhold, hold_preds)))
else:
print('Saving model to disk ...')
# train model
model, feat_df = m.train(train, test, feature_list, is_eval=True, **PARAMS)
if not is_sample:
model.save_model(os.path.join(basepath, output_path + f'{model_identifier}_model.txt'))
if not os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}{fold_indicator}_true_holdout.npy')):
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}{fold_indicator}_true_holdout.npy'), test.TARGET)
hold_preds = model.predict(test.loc[:, feature_list])
np.save(os.path.join(basepath, output_path + f'{model_identifier}_preds_holdout.npy'), hold_preds)
feat_df.to_csv(os.path.join(basepath, output_path + f'{model_identifier}_feat_imp.csv'), index=False)
elif args.cv:
print('Cross validation on training and store parameters and cv score on disk ...')
input_path = args.input_path
output_path = args.output_path
data_folder = args.data_folder
is_sample = args.s
SEED = args.seed
params = {
'input_path': input_path,
'output_path': output_path,
'data_folder': data_folder
}
m = Modelv96(**params)
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}data.h5')):
print('Loading dataset from disk ...')
data = pd.read_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
else:
print('Merge feature groups and save them to disk ...')
train, test = m.merge_datasets()
train, test = m.fe(train, test)
data = pd.concat((train, test))
data = m.reduce_mem_usage(data)
data.to_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
del train, test
gc.collect()
train = data.iloc[:m.n_train]
del data
gc.collect()
if is_sample:
print('*' * 100)
print('Take a random sample of the training data ...')
train = train.sample(frac=SAMPLE_SIZE)
# check to see if feature list exists on disk or not for a particular model
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy')):
feature_list = np.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'))
else:
feature_list = train.columns.tolist()
feature_list = list(set(feature_list) - set(COLS_TO_REMOVE))
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'), feature_list)
PARAMS['seed'] = SEED
PARAMS['feature_fraction_seed'] = SEED
PARAMS['bagging_seed'] = SEED
# cv_adversarial_filepath = os.path.join(basepath, 'data/raw/cv_adversarial_idx_v1.csv')
cv_adversarial_filepath = None
cv_history = m.cross_validate(train, feature_list, PARAMS.copy(), cv_adversarial_filepath)
cv_score = str(cv_history.iloc[-1]['auc-mean']) + '_' + str(cv_history.iloc[-1]['auc-stdv'])
PARAMS['num_boost_round'] = len(cv_history)
print('*' * 100)
print('Best AUC: {}'.format(cv_score))
joblib.dump(PARAMS, os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_params.pkl'))
joblib.dump(cv_score, os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_cv.pkl'))
elif args.oof:
print('Generate oof predictions for train and test set ...')
input_path = args.input_path
output_path = args.output_path
data_folder = args.data_folder
SEED = args.seed
params = {
'input_path': input_path,
'output_path': output_path,
'data_folder': data_folder
}
m = Modelv96(**params)
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}data.h5')):
print('Loading dataset from disk ...')
data = pd.read_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
else:
print('Merge feature groups and save them to disk ...')
train, test = m.merge_datasets()
train, test = m.fe(train, test)
data = pd.concat((train, test))
data = m.reduce_mem_usage(data)
data.to_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
del train, test
gc.collect()
train = data.iloc[:m.n_train]
test = data.iloc[m.n_train:]
del data
gc.collect()
# check to see if feature list exists on disk or not for a particular model
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy')):
feature_list = np.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'))
else:
feature_list = train.columns.tolist()
feature_list = list(set(feature_list) - set(COLS_TO_REMOVE))
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'), feature_list)
PARAMS = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_params.pkl'))
# model construction
model = lgb.LGBMClassifier(num_leaves=PARAMS['num_leaves'],
max_depth=PARAMS['max_depth'],
learning_rate=PARAMS['learning_rate'],
n_estimators=PARAMS['num_boost_round'],
objective=PARAMS['objective'],
min_child_weight=PARAMS['min_child_weight'],
min_child_samples=PARAMS['min_data_in_leaf'],
subsample=PARAMS['bagging_fraction'],
colsample_bytree=PARAMS['sub_feature'],
reg_lambda=PARAMS['lambda_l2'],
reg_alpha=PARAMS['lambda_l1'],
min_split_gain=PARAMS['min_split_gain'],
random_state=SEED,
verbose=-1,
n_jobs=8
)
oof_preds, test_preds = m.oof_preds(train, test, feature_list, model)
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_oof_preds.npy'), oof_preds)
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_test.npy'), test_preds)
elif args.t:
print('Full Training')
input_path = args.input_path
output_path = args.output_path
data_folder = args.data_folder
CV_SEED = args.cv_seed
SEED = args.seed
params = {
'input_path': input_path,
'output_path': output_path,
'data_folder': data_folder
}
m = Modelv96(**params)
# Load or save data from/ on disk
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}data.h5')):
print('Loading dataset from disk ...')
data = pd.read_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
else:
print('Merge feature groups and save them to disk ...')
train, test = m.merge_datasets()
train, test = m.fe(train, test)
data = pd.concat((train, test))
data = m.reduce_mem_usage(data)
data.to_hdf(os.path.join(basepath, output_path + f'{data_folder}data.h5'), format='table', key='data')
del train, test
gc.collect()
# separate out training and test set.
train = data.iloc[:m.n_train]
test = data.iloc[m.n_train:]
# check to see if feature list exists on disk or not for a particular model
if os.path.exists(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy')):
feature_list = np.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'))
else:
feature_list = train.columns.tolist()
feature_list = list(set(feature_list) - set(COLS_TO_REMOVE))
np.save(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_features.npy'), feature_list)
# Load params and holdout score from disk.
PARAMS = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{CV_SEED}_params.pkl'))
HOLDOUT_SCORE = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{CV_SEED}_cv.pkl'))
PARAMS['num_boost_round'] = int(1.1 * PARAMS['num_boost_round'])
PARAMS['learning_rate'] /= 1.1
PARAMS['seed'] = SEED
PARAMS['feature_fraction_seed'] = SEED
PARAMS['bagging_seed'] = SEED
print('*' * 100)
print('PARAMS are: {}'.format(PARAMS))
# train model
model, feat_df = m.train(train, test, feature_list, is_eval=False, **PARAMS)
# evaluation part
preds, score = m.evaluate(test, feature_list, is_eval=False, model=model)
sub_identifier = "%s-%s-%s-%s-%s" % (datetime.now().strftime('%Y%m%d-%H%M'), MODEL_FILENAME, HOLDOUT_SCORE, SEED, data_folder[:-1])
sub = pd.read_csv(os.path.join(basepath, 'data/raw/sample_submission.csv.zip'))
sub['TARGET'] = preds
sub.to_csv(os.path.join(basepath, 'submissions/%s.csv'%(sub_identifier)), index=False)
elif args.ensemble:
input_path = args.input_path
output_path = args.output_path
data_folder = args.data_folder
CV_SEED = args.cv_seed
output_files = []
ensemble_preds = 0
for f in output_files:
sub = pd.read_csv(f)['TARGET'].values
ensemble_preds += sub
ensemble_preds /= len(output_files)
HOLDOUT_SCORE = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{CV_SEED}_cv.pkl'))
sub_identifier = "%s-%s-%s-%s" % (datetime.now().strftime('%Y%m%d-%H%M'), MODEL_FILENAME, HOLDOUT_SCORE, data_folder[:-1])
sub = pd.read_csv(os.path.join(basepath, 'data/raw/sample_submission.csv.zip'))
sub['TARGET'] = ensemble_preds
sub.to_csv(os.path.join(basepath, 'submissions/ensemble_%s.csv'%(sub_identifier)), index=False)
| 54.063672
| 178
| 0.621604
|
bfe223b8dc7fabe5eb052ff84f4d3f160ebb1ec5
| 1,757
|
py
|
Python
|
termicoder/utils/parse.py
|
apb7/termicoder
|
f0540a2fb4fe0858fa6a9d63e722ad5aa525e14a
|
[
"MIT"
] | 1
|
2018-06-22T06:59:24.000Z
|
2018-06-22T06:59:24.000Z
|
termicoder/utils/parse.py
|
apb7/termicoder
|
f0540a2fb4fe0858fa6a9d63e722ad5aa525e14a
|
[
"MIT"
] | null | null | null |
termicoder/utils/parse.py
|
apb7/termicoder
|
f0540a2fb4fe0858fa6a9d63e722ad5aa525e14a
|
[
"MIT"
] | null | null | null |
import os
import json
import click
import termicoder.utils.display as display
supported_extensions = [".py", ".cpp", ".c", ".java", ".c++", ".cc"]
def get_judge():
problem_file_path = ".problem"
try:
f = open(problem_file_path, "r")
except BaseException:
display.file_read_error(problem_file_path, abort=True)
else:
j = json.load(f)
return j["judge"]
def get_file_name(file):
# the file may be a str or a click.file return file name finally
if(not isinstance(file, str)):
return str(file.name)
else:
return str(file)
# this helps is reducing time as intelligently handling default
def get_code_file():
probable_files = []
for f in os.listdir(os.getcwd()):
if(os.path.isfile(f) and os.path.splitext(f)[1] in supported_extensions):
probable_files.append(f)
default_file = None
if(probable_files):
default_file = probable_files[0]
# defaulting to latest file
for f in probable_files:
if(os.path.getmtime(f) > os.path.getmtime(default_file)):
default_file = f
code_file = click.prompt('Please provide a code file', type=click.File(),
default=default_file)
return code_file
def get_time_limit():
time_limit = None
problem_file_path = ".problem"
try:
f = open(problem_file_path, "r")
except BaseException:
pass
else:
j = json.load(f)
try:
time_limit = j["max_timelimit"]
except BaseException:
pass
if(time_limit is None):
return 3.0
else:
return float(time_limit)
def get_memory_limit():
click.echo("memory_limit not implemented in this version")
| 23.426667
| 81
| 0.622652
|
12a4cdb9de3bde812b09c5f54bac42e1d4b154b4
| 5,320
|
py
|
Python
|
eucaconsole/forms/securitygroups.py
|
gholms/eucaconsole
|
4629c961c90e3aae27e3a869a7f157bafeda6489
|
[
"BSD-2-Clause"
] | null | null | null |
eucaconsole/forms/securitygroups.py
|
gholms/eucaconsole
|
4629c961c90e3aae27e3a869a7f157bafeda6489
|
[
"BSD-2-Clause"
] | null | null | null |
eucaconsole/forms/securitygroups.py
|
gholms/eucaconsole
|
4629c961c90e3aae27e3a869a7f157bafeda6489
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2013-2016 Hewlett Packard Enterprise Development LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Forms for Security Groups
"""
import wtforms
from wtforms import validators
from ..i18n import _
from ..views import BaseView
from . import BaseSecureForm, ChoicesManager, TextEscapedField, ASCII_WITHOUT_SLASHES_NOTICE
class SecurityGroupForm(BaseSecureForm):
"""Security Group create/edit form
Constraints for VPC security group name/desc: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$*
See http://docs.aws.amazon.com/cli/latest/reference/ec2/create-security-group.html
"""
name_error_msg = ASCII_WITHOUT_SLASHES_NOTICE
name = wtforms.TextField(
label=_(u'Name'),
validators=[validators.DataRequired(message=name_error_msg)],
)
sgroup_description_pattern = r'^[a-zA-Z0-9\s\._\-:\/\(\)#,@\[\]\+=;\{\}!\$\*]+$'
DESCRIPTION_RESTRICTION_NOTICE = _(
u'Description is required, must be between 1 and 255 characters, and may only contain '
u'letters, numbers, spaces, and the following characters:')
special_chars = u'._-:/()#,@[]+=;{}!$*'
desc_error_msg = u'{0} {1}'.format(DESCRIPTION_RESTRICTION_NOTICE, special_chars)
description = wtforms.TextAreaField(
label=_(u'Description'),
validators=[
validators.DataRequired(message=desc_error_msg),
validators.Length(max=255, message=_(u'Description must be less than 255 characters'))
],
)
securitygroup_vpc_network = wtforms.SelectField(label=_(u'VPC network'))
def __init__(self, request, vpc_conn=None, security_group=None, **kwargs):
super(SecurityGroupForm, self).__init__(request, **kwargs)
self.vpc_conn = vpc_conn
self.name.error_msg = self.name_error_msg # Used for Foundation Abide error message
self.description.error_msg = self.desc_error_msg # Used for Foundation Abide error message
self.vpc_choices_manager = ChoicesManager(conn=vpc_conn)
self.cloud_type = request.session.get('cloud_type', 'euca')
from ..views import BaseView
self.is_vpc_supported = BaseView.is_vpc_supported(request)
self.set_vpc_choices()
# Although we don't need to show the name/desc fields on update, we need these here to ensure the form is valid
if security_group is not None:
self.name.data = security_group.name
self.description.data = security_group.description
self.securitygroup_vpc_network.data = security_group.vpc_id or ''
def set_vpc_choices(self):
if self.cloud_type == 'euca' and self.is_vpc_supported:
self.securitygroup_vpc_network.choices = self.vpc_choices_manager.vpc_networks(add_blank=False)
else:
self.securitygroup_vpc_network.choices = self.vpc_choices_manager.vpc_networks()
class SecurityGroupDeleteForm(BaseSecureForm):
"""Security Group deletion form.
Only need to initialize as a secure form to generate CSRF token
"""
pass
class SecurityGroupsFiltersForm(BaseSecureForm):
"""Form class for filters on landing page"""
vpc_id = wtforms.SelectMultipleField(label=_(u'VPC network'))
tags = TextEscapedField(label=_(u'Tags'))
def __init__(self, request, vpc_conn=None, cloud_type='euca', **kwargs):
super(SecurityGroupsFiltersForm, self).__init__(request, **kwargs)
self.request = request
self.cloud_type = cloud_type
self.vpc_choices_manager = ChoicesManager(conn=vpc_conn)
self.vpc_id.choices = self.vpc_choices_manager.vpc_networks(add_blank=False)
if self.cloud_type == 'aws':
self.vpc_id.choices.append(('None', _(u'No VPC')))
self.vpc_id.choices = sorted(self.vpc_id.choices)
self.facets = []
if BaseView.is_vpc_supported(request):
self.facets.append(
{'name': 'vpc_id', 'label': self.vpc_id.label.text,
'options': self.get_options_from_choices(self.vpc_id.choices)}
)
| 46.26087
| 119
| 0.709962
|
578554499d010d3b09aa6ec6fe400fa5275b5c86
| 6,874
|
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/config_validator/validator.py
|
tanner-bruce/integrations-core
|
36337b84fefb73e94d4f1ee28aaeb669dc12fb59
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/config_validator/validator.py
|
tanner-bruce/integrations-core
|
36337b84fefb73e94d4f1ee28aaeb669dc12fb59
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/config_validator/validator.py
|
tanner-bruce/integrations-core
|
36337b84fefb73e94d4f1ee28aaeb669dc12fb59
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .config_block import ConfigBlock
from .utils import get_end_of_part, get_indent, is_at_least_indented, is_blank
from .validator_errors import SEVERITY_WARNING, ValidatorError
def validate_config(config):
"""Function used to validate a whole yaml configuration file. Will check if there are both the init_config and
the instances sections. And will parse using the _parse_for_config_blocks function
"""
errors = []
blocks = [] # This will store ConfigBlocks as a tree
config_lines = config.split('\n')
init_config_line = -1
instances_line = -1
for i, line in enumerate(config_lines):
if line.startswith("init_config:"):
init_config_line = i
if line != "init_config:":
errors.append(ValidatorError("Expected no data after ':'", i, SEVERITY_WARNING))
if line.startswith("instances:"):
instances_line = i
if line != "instances:":
errors.append(ValidatorError("Expected no data after ':'", i, SEVERITY_WARNING))
if init_config_line == -1:
errors.append(ValidatorError("Missing `init_config` section", None))
return errors
if instances_line == -1:
errors.append(ValidatorError("Missing `instances` section", None))
return errors
# parse init_config data
blocks.append(_parse_init_config(config_lines, init_config_line, errors))
# parse instances data
instances_end = get_end_of_part(config_lines, instances_line)
if instances_end is None:
errors.append(ValidatorError("Malformed file, cannot find end of part 'instances'", instances_line))
return errors
blocks.append(_parse_for_config_blocks(config_lines, instances_line + 1, instances_end, errors))
_check_no_duplicate_names(blocks, errors)
_validate_blocks(blocks, errors)
return errors
def _parse_init_config(config_lines, init_config_start_line, errors):
"""Function used to parse the init_config section and return the list of 'ConfigBlock'
It first checks if the section contains data or not. If not, it returns an empty list. Otherwise
it will use the _parse_for_config_blocks function to parse it between the beginning and the end of the part
"""
blocks = []
idx = init_config_start_line + 1
# Check if the init_config part contains data or not
while idx < len(config_lines):
current_line = config_lines[idx]
if is_blank(current_line):
idx += 1
continue
elif is_at_least_indented(current_line, 1):
# There is data in 'init_config'
break
else:
# There is no data, do not try to parse the init_config
return blocks
end = get_end_of_part(config_lines, init_config_start_line)
if end is None:
errors.append(ValidatorError("Malformed file, cannot find end of part 'init_config'", init_config_start_line))
return blocks
return _parse_for_config_blocks(config_lines, init_config_start_line + 1, end, errors)
def _parse_for_config_blocks(config_lines, start, end, errors):
"""The function basically do all the work. It reads the config from start, removes blank lines first then when it first
sees data, it sets the 'indent' variable once for all. All blocks read in a given function call must have the same
indentation. Sub-blocks are parsed recursively and thus the 'indent' variable is given a new value.
Once a block is parsed the function will either recurse if the block requires it (see ConfigBlock), or it will go
to the next block and iterate.
"""
idx = start
blocks = []
# Go to the first line with data (see 'is_blank')
while idx < end:
if is_blank(config_lines[idx]):
idx += 1
continue
break
else:
return blocks
# All blocks of a same level must have the same indentation. Let's use the first one to compare them
indent = get_indent(config_lines[idx])
while idx < end:
current_line = config_lines[idx]
if is_blank(current_line):
idx += 1
continue
if not is_at_least_indented(current_line, indent):
errors.append(ValidatorError("Content is not correctly indented - skipping rest of file", idx))
# File will not be able to be parsed correctly if indentation is wrong
return blocks
cfg_block = ConfigBlock.parse_from_strings(idx, config_lines, indent, errors)
# Even if there has been an issue when parsing the block, cfg_block.length always point to another block
# (either a sub-block or not) or to EOF
idx += cfg_block.length
blocks.append(cfg_block)
if cfg_block.should_recurse:
# new_end points to the next line having the same indent as the cfg_block
new_end = get_end_of_part(config_lines, idx, indent=indent)
if new_end is None:
block_name = cfg_block.param_prop.var_name if cfg_block.param_prop else "?"
err_string = "The object {} cannot be parsed correctly, check indentation".format(block_name)
errors.append(ValidatorError(err_string, idx))
return blocks
if new_end > end:
new_end = end
blocks += _parse_for_config_blocks(config_lines, idx, new_end, errors)
idx = new_end
return blocks
def _check_no_duplicate_names(blocks, errors):
"""blocks contains ConfigBlocks as a tree. This function makes sure that each yaml object has no duplicates
variables and return a list of errors to be displayed if duplicates are found. The @param declaration needs to
be there for this to correctly identify a variable.
"""
same_level_blocks = [b for b in blocks if isinstance(b, ConfigBlock)]
names_list = [b.param_prop.var_name for b in same_level_blocks if b.param_prop]
duplicates = set([x for x in names_list if names_list.count(x) > 1])
for dup in duplicates:
errors.append(ValidatorError("Duplicate variable with name {}".format(dup), None))
sub_lists_of_other_blocks = [b for b in blocks if isinstance(b, list)]
for l in sub_lists_of_other_blocks:
_check_no_duplicate_names(l, errors)
def _validate_blocks(blocks, errors):
"""blocks contains ConfigBlocks as a tree. This function iterate over it to run the validate method on each
ConfigBlock and append errors to the provided array if needed.
"""
leaves = [b for b in blocks if isinstance(b, ConfigBlock)]
for b in leaves:
b.validate(errors)
nodes = [b for b in blocks if isinstance(b, list)]
for n in nodes:
_validate_blocks(n, errors)
| 42.695652
| 123
| 0.686063
|
2208af44a2e6f99d8e95d530a79e85e70dfd47c2
| 3,373
|
py
|
Python
|
Trakttv.bundle/Contents/Libraries/Shared/trakt/objects/season.py
|
disrupted/Trakttv.bundle
|
24712216c71f3b22fd58cb5dd89dad5bb798ed60
|
[
"RSA-MD"
] | 1,346
|
2015-01-01T14:52:24.000Z
|
2022-03-28T12:50:48.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/trakt/objects/season.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 474
|
2015-01-01T10:27:46.000Z
|
2022-03-21T12:26:16.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/trakt/objects/season.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 191
|
2015-01-02T18:27:22.000Z
|
2022-03-29T10:49:48.000Z
|
from trakt.core.helpers import to_iso8601_datetime, from_iso8601_datetime, deprecated
from trakt.objects.core.helpers import update_attributes
from trakt.objects.media import Media
class Season(Media):
def __init__(self, client, keys=None, index=None):
super(Season, self).__init__(client, keys, index)
self.show = None
"""
:type: :class:`trakt.objects.show.Show`
Show
"""
self.episodes = {}
"""
:type: :class:`~python:dict`
Episodes, defined as :code:`{episode_num: Episode}`
**Note:** this field might not be available with some methods
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.episode_count = None
"""
:type: :class:`~python:int`
Total episode count
"""
self.aired_episodes = None
"""
:type: :class:`~python:int`
Aired episode count
"""
def to_identifier(self):
"""Returns the season identifier which is compatible with requests that require
season definitions.
:return: Season identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'number': self.pk,
'episodes': [
episode.to_dict()
for episode in self.episodes.values()
]
}
@deprecated('Season.to_info() has been moved to Season.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead"""
return self.to_dict()
def to_dict(self):
"""Dump season to a dictionary
:return: Season dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the season identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.episode_count:
result['episode_count'] = self.episode_count
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Season, self)._update(info, **kwargs)
update_attributes(self, info, [
# Extended Info
'episode_count',
'aired_episodes'
])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
season = cls(client, keys, index=index)
season._update(info, **kwargs)
return season
def __repr__(self):
if self.show:
return '<Season %r - S%02d>' % (self.show.title, self.pk)
return '<Season S%02d>' % self.pk
| 26.147287
| 104
| 0.565372
|
d3a091a1f07abe82eb3fb181f720fb9da96dfac2
| 7,302
|
py
|
Python
|
build/lib/scripts/database_tools.py
|
nicolas998/ifis_tools
|
f7b06473a916324fc37937bc5e9034cc57bc1623
|
[
"MIT"
] | 3
|
2019-09-05T14:47:02.000Z
|
2021-11-12T15:31:56.000Z
|
build/lib/scripts/database_tools.py
|
nicolas998/ifis_tools
|
f7b06473a916324fc37937bc5e9034cc57bc1623
|
[
"MIT"
] | 2
|
2019-11-13T21:36:22.000Z
|
2019-12-16T21:16:43.000Z
|
build/lib/scripts/database_tools.py
|
nicolas998/ifis_tools
|
f7b06473a916324fc37937bc5e9034cc57bc1623
|
[
"MIT"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # database_tools:
#
# Set of tools to connect to the data base, put and get data from them.
import psycopg2
from psycopg2 import sql
import pandas as pd
from datetime import datetime
import numpy as np
import auxiliar as aux
# +
def DataBaseConnect(user = "iihr_student", password = "iihr.student", host = "s-iihr51.iihr.uiowa.edu",
port = "5435", database = "research_environment"):
'''Connect to the database that hsa stored the usgs information'''
con = psycopg2.connect(user = user,
password = password,
host = host,
port = port,
database = database)
return con
def SQL_read_USGS_Streamflow(usgs_id, date1, date2, schema = 'pers_nico',
table = 'data_usgs', time_name = 'unix_time', data_name = 'val', usgs_name = 'usgs_id'):
'''Read streamflow data from IIHR database "research_environment"
and returns it as a pandas.DataFrame element.
Parameters:
- usgs_id: code of the usgs.
- date1: initial date of the query.
- date2: final date of the query.
Optional:
- schema: where to obtain data in the databse.
- table: master table with the usgs data.
- time_name: the name of the column that has the time.
- data_name: the name of the column that has the data.
- usgs_name: the name of the column that has the id of the usgs stations.
Returns:
- pandas.DataFrame containing the streamflow data.'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '10A28Gir0')
#Work with dates and usgs id
date1 = str(aux.__datetime2unix__(date1))
date2 = str(aux.__datetime2unix__(date2))
if type(usgs_id) is not str:
usgs_id = str(usgs_id)
#make the querty
query = sql.SQL("SELECT "+time_name+", "+data_name+" FROM "+schema+"."+table+" WHERE "+time_name+" BETWEEN "+date1+" and "+date2+" AND "+usgs_name+"='"+usgs_id+"'")
#Make the consult.
Data = pd.read_sql(query, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}})
con.close()
return Data
#SQL Query to obtain the data from per_felipe.pois_adv_geom
def SQL_USGS_at_IFIS():
'''Return the list of the usgs stations in the IFIS system and the linkID where they
belong.'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '10A28Gir0')
#Query for the stations
query = sql.SQL("SELECT foreign_id,link_id FROM pers_felipe.pois_adv_geom where type in (2,3) and foreign_id like '0%' AND link_id < 620000")
#make the consult
cur = con.cursor()
cur.execute(query)
L = cur.fetchall()
cur.close()
con.close()
#Obtains a dictionary in which stations are the key
DicUSGSinIFIS = {}
for l in L:
DicUSGSinIFIS.update({l[0]:l[1]})
return DicUSGSinIFIS
def SQL_USGS_at_MATC():
'''Return the list of stations that are in the databse pers_nico (matc).'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '10A28Gir0')
#Make the query
query = sql.SQL("SELECT DISTINCT(usgs_id) FROM pers_nico.data_usgs_2008")
cur = con.cursor()
cur.execute(query)
L = cur.fetchall()
cur.close()
con.close()
return [l[0] for l in L]
def SQL_Get_linkArea(linkID):
'''Obtains the up area for a link ID'''
#The query and the obtentions
con = DataBaseConnect('nicolas','10A28Gir0')
cur = con.cursor()
q = sql.SQL("SELECT upstream_area FROM pers_felipe.pois_adv_geom WHERE link_id = "+str(linkID))
cur.execute(q)
A = cur.fetchall()
cur.close()
con.close()
return A[0][0]*2.583
def SQL_Read_MeanRainfall(link_id, date1, date2, schema = 'pers_nico',
table = 's4mrain', time_name = 'unix_time', data_name = 'rain', linkid_name = 'link_id'):
'''Read streamflow data from IIHR database "research_environment"
and returns it as a pandas.DataFrame element.
Parameters:
- usgs_id: code of the usgs.
- date1: initial date of the query.
- date2: final date of the query.
Optional:
- schema: where to obtain data in the databse.
- table: master table with the usgs data.
- time_name: the name of the column that has the time.
- data_name: the name of the column that has the data.
- usgs_name: the name of the column that has the id of the usgs stations.
Returns:
- pandas.DataFrame containing the streamflow data.'''
#make the connection
con = DataBaseConnect(user = 'nicolas', password = '10A28Gir0')
#Work with dates and usgs id
date1 = str(aux.__datetime2unix__(date1))
date2 = str(aux.__datetime2unix__(date2))
if type(link_id) is not str:
link_id = str(link_id)
#make the querty
query = sql.SQL("SELECT "+time_name+", "+data_name+" FROM "+schema+"."+table+" WHERE "+time_name+" BETWEEN "+date1+" and "+date2+" AND "+linkid_name+"='"+link_id+"'")
#Make the consult.
Data = pd.read_sql(query, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}})
con.close()
#Organize rainfall
Data = Data.sort_index()
Dates = pd.date_range(Data.index[0], Data.index[-1], freq='1h')
Rain = pd.Series(np.zeros(Dates.size), Dates)
Rain[Data.index] = Data['rain'].values
Rain[Rain>1000] = 0.0
return Rain
def SQL_Get_MeanRainfall(linkID, date1, date2):
'''Obtains the mean rainfall for the watershed associated to
a given linkID.
Parameters:
- linkID: linkID of the outlet of the basin.
- date1: initial date (YYYY-MM-DD HH:MM).
- date2: end date (YYYY-MM-DD HH:MM).
Returns:
- Rainfall: Pandas series with the mean rainfall in the basin.'''
#SEt the connection
con = DataBaseConnect(user='nicolas', password='10A28Gir0', database='rt_precipitation')
#Transform dates to unix
unix1 = str(aux.__datetime2unix__(date1))
unix2 = str(aux.__datetime2unix__(date2))
linkID = str(linkID)
#Set the query and obtains data
q = sql.SQL("WITH subbasin AS (SELECT nodeX.link_id AS link_id FROM students.env_master_km AS nodeX, students.env_master_km AS parentX WHERE (nodeX.left BETWEEN parentX.left AND parentX.right) AND parentX.link_id = "+str(linkID)+"), uparea as (SELECT up_area FROM students.env_master_km WHERE link_id= "+str(linkID)+"), lut as (SELECT x, y FROM env_lookup_hrap_lid_v4 WHERE link_id IN (SELECT * FROM subbasin) group by x, y) SELECT unix_time, sum(val)/(SELECT count(*) FROM lut) as rain FROM stage_4.data WHERE grid_x IN (SELECT x FROM lut) AND grid_y IN (SELECT y from lut) AND unix_time between "+unix1+" AND "+unix2+" group by unix_time order by unix_time;")
Data = pd.read_sql(q, con, index_col='unix_time',parse_dates={'unix_time':{'unit':'s'}})
#close connection
con.close()
#Pos process data
dates = pd.date_range(date1, date2, freq='1h')
Rain = pd.Series(np.zeros(dates.size), dates)
Rain[Data.index] = Data['rain']
return Rain
# -
| 40.793296
| 665
| 0.665845
|
28174f965e0920e05ef5a65c29d714d927e3e844
| 11,416
|
py
|
Python
|
tests/test_gpu_aamp.py
|
profintegra/stumpy
|
66b3402d91820005b466e1da6fe353b61e6246c5
|
[
"BSD-3-Clause"
] | 1
|
2021-07-27T11:14:01.000Z
|
2021-07-27T11:14:01.000Z
|
tests/test_gpu_aamp.py
|
profintegra/stumpy
|
66b3402d91820005b466e1da6fe353b61e6246c5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_gpu_aamp.py
|
profintegra/stumpy
|
66b3402d91820005b466e1da6fe353b61e6246c5
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import gpu_aamp
from stumpy import config
from numba import cuda
import pytest
import naive
config.THREADS_PER_BLOCK = 10
if not cuda.is_available():
pytest.skip("Skipping Tests No GPUs Available", allow_module_level=True)
test_data = [
(
np.array([9, 8100, -60, 7], dtype=np.float64),
np.array([584, -11, 23, 79, 1001, 0, -19], dtype=np.float64),
),
(
np.random.uniform(-1000, 1000, [8]).astype(np.float64),
np.random.uniform(-1000, 1000, [64]).astype(np.float64),
),
]
window_size = [8, 16, 32]
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
substitution_values = [np.nan, np.inf]
def test_gpu_aamp_int_input():
with pytest.raises(TypeError):
gpu_aamp(np.arange(10), 5, ignore_trivial=True)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_gpu_aamp_self_join(T_A, T_B):
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_B, m, exclusion_zone=zone)
comp_mp = gpu_aamp(T_B, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
comp_mp = gpu_aamp(pd.Series(T_B), m, ignore_trivial=True)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("m", window_size)
def test_gpu_aamp_self_join_larger_window(T_A, T_B, m):
if len(T_B) > m:
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_B, m, exclusion_zone=zone)
comp_mp = gpu_aamp(T_B, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_aamp(
# pd.Series(T_B),
# m,
# ignore_trivial=True,
# )
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_gpu_aamp_A_B_join(T_A, T_B):
m = 3
ref_mp = naive.aamp(T_B, m, T_B=T_A)
comp_mp = gpu_aamp(T_B, m, T_A, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_aamp(pd.Series(T_B), m, pd.Series(T_A), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_parallel_gpu_aamp_self_join(T_A, T_B):
device_ids = [device.id for device in cuda.list_devices()]
if len(T_B) > 10:
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_B, m, exclusion_zone=zone)
comp_mp = gpu_aamp(
T_B,
m,
ignore_trivial=True,
device_id=device_ids,
)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_aamp(
# pd.Series(T_B),
# m,
# ignore_trivial=True,
# device_id=device_ids,
# )
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
def test_parallel_gpu_aamp_A_B_join(T_A, T_B):
device_ids = [device.id for device in cuda.list_devices()]
if len(T_B) > 10:
m = 3
ref_mp = naive.aamp(T_B, m, T_B=T_A)
comp_mp = gpu_aamp(
T_B,
m,
T_A,
ignore_trivial=False,
device_id=device_ids,
)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_aamp(
# pd.Series(T_B),
# m,
# pd.Series(T_A),
# ignore_trivial=False,
# device_id=device_ids,
# )
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
def test_gpu_aamp_constant_subsequence_self_join():
T_A = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64)))
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_A, m, exclusion_zone=zone)
comp_mp = gpu_aamp(T_A, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_aamp(pd.Series(T_A), m, ignore_trivial=True)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
def test_gpu_aamp_one_constant_subsequence_A_B_join():
T_A = np.random.rand(20)
T_B = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64)))
m = 3
ref_mp = naive.aamp(T_B, m, T_B=T_A)
comp_mp = gpu_aamp(T_B, m, T_A, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_aamp(pd.Series(T_B), m, pd.Series(T_A), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# Swap inputs
ref_mp = naive.aamp(T_A, m, T_B=T_B)
comp_mp = gpu_aamp(T_A, m, T_B, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_aamp(pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
def test_gpu_aamp_two_constant_subsequences_A_B_join():
T_A = np.array([0, 0, 0, 0, 0, 1], dtype=np.float64)
T_B = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(5, dtype=np.float64)))
m = 3
ref_mp = naive.aamp(T_B, m, T_B=T_A)
comp_mp = gpu_aamp(T_B, m, T_A, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_aamp(pd.Series(T_B), m, pd.Series(T_A), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# Swap inputs
ref_mp = naive.aamp(T_A, m, T_B=T_B)
comp_mp = gpu_aamp(T_A, m, T_B, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
# comp_mp = gpu_aamp(pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp[:, 0], comp_mp[:, 0]) # ignore indices
def test_gpu_aamp_identical_subsequence_self_join():
identical = np.random.rand(8)
T_A = np.random.rand(20)
T_A[1 : 1 + identical.shape[0]] = identical
T_A[11 : 11 + identical.shape[0]] = identical
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_A, m, exclusion_zone=zone)
comp_mp = gpu_aamp(T_A, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(
ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
) # ignore indices
# comp_mp = gpu_aamp(pd.Series(T_A), m, ignore_trivial=True)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(
# ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
# ) # ignore indices
def test_gpu_aamp_identical_subsequence_A_B_join():
identical = np.random.rand(8)
T_A = np.random.rand(20)
T_B = np.random.rand(20)
T_A[1 : 1 + identical.shape[0]] = identical
T_B[11 : 11 + identical.shape[0]] = identical
m = 3
ref_mp = naive.aamp(T_A, m, T_B=T_B)
comp_mp = gpu_aamp(T_A, m, T_B, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(
ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
) # ignore indices
# comp_mp = gpu_aamp(pd.Series(T_A), m, pd.Series(T_B), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(
# ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
# ) # ignore indices
# Swap inputs
ref_mp = naive.aamp(T_B, m, T_B=T_A)
comp_mp = gpu_aamp(T_B, m, T_A, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(
ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
) # ignore indices
# comp_mp = gpu_aamp(pd.Series(T_B), m, pd.Series(T_A), ignore_trivial=False)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(
# ref_mp[:, 0], comp_mp[:, 0], decimal=config.STUMPY_TEST_PRECISION
# ) # ignore indices
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("substitute_B", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_gpu_aamp_nan_inf_self_join(T_A, T_B, substitute_B, substitution_locations):
m = 3
stop = 16
T_B_sub = T_B.copy()[:stop]
for substitution_location_B in substitution_locations:
T_B_sub[:] = T_B[:stop]
T_B_sub[substitution_location_B] = substitute_B
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T_B_sub, m, exclusion_zone=zone)
comp_mp = gpu_aamp(T_B_sub, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_aamp(pd.Series(T_B_sub), m, ignore_trivial=True)
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
@pytest.mark.parametrize("T_A, T_B", test_data)
@pytest.mark.parametrize("substitute_A", substitution_values)
@pytest.mark.parametrize("substitute_B", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_gpu_aamp_nan_inf_A_B_join(
T_A, T_B, substitute_A, substitute_B, substitution_locations
):
m = 3
stop = 16
T_A_sub = T_A.copy()
T_B_sub = T_B.copy()[:stop]
for substitution_location_B in substitution_locations:
for substitution_location_A in substitution_locations:
T_A_sub[:] = T_A
T_B_sub[:] = T_B[:stop]
T_A_sub[substitution_location_A] = substitute_A
T_B_sub[substitution_location_B] = substitute_B
ref_mp = naive.aamp(T_B_sub, m, T_B=T_A_sub)
comp_mp = gpu_aamp(T_B_sub, m, T_A_sub, ignore_trivial=False)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
# comp_mp = gpu_aamp(
# pd.Series(T_B_sub), m, pd.Series(T_A_sub), ignore_trivial=False
# )
# naive.replace_inf(comp_mp)
# npt.assert_almost_equal(ref_mp, comp_mp)
def test_gpu_aamp_nan_zero_mean_self_join():
T = np.array([-1, 0, 1, np.inf, 1, 0, -1])
m = 3
zone = int(np.ceil(m / 4))
ref_mp = naive.aamp(T, m, exclusion_zone=zone)
comp_mp = gpu_aamp(T, m, ignore_trivial=True)
naive.replace_inf(ref_mp)
naive.replace_inf(comp_mp)
npt.assert_almost_equal(ref_mp, comp_mp)
| 34.077612
| 88
| 0.656184
|
79ef4a5fd7f4f90112e6a20dcba81e0008826c1f
| 2,260
|
py
|
Python
|
homeassistant/components/switch/litejet.py
|
sara0871/laughing--barnacle-
|
70412fc0ba42ccfe446c0c62e327eceeda56a2ab
|
[
"Apache-2.0"
] | 7
|
2018-08-03T10:15:36.000Z
|
2019-03-25T13:31:55.000Z
|
homeassistant/components/switch/litejet.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:06:43.000Z
|
2022-03-12T00:56:04.000Z
|
homeassistant/components/switch/litejet.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 3
|
2018-12-04T11:54:27.000Z
|
2019-08-31T14:41:32.000Z
|
"""
Support for LiteJet switch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.litejet/
"""
import logging
from homeassistant.components import litejet
from homeassistant.components.switch import SwitchDevice
DEPENDENCIES = ['litejet']
ATTR_NUMBER = 'number'
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the LiteJet switch platform."""
litejet_ = hass.data['litejet_system']
devices = []
for i in litejet_.button_switches():
name = litejet_.get_switch_name(i)
if not litejet.is_ignored(hass, name):
devices.append(LiteJetSwitch(hass, litejet_, i, name))
add_devices(devices, True)
class LiteJetSwitch(SwitchDevice):
"""Representation of a single LiteJet switch."""
def __init__(self, hass, lj, i, name):
"""Initialize a LiteJet switch."""
self._hass = hass
self._lj = lj
self._index = i
self._state = False
self._name = name
lj.on_switch_pressed(i, self._on_switch_pressed)
lj.on_switch_released(i, self._on_switch_released)
def _on_switch_pressed(self):
_LOGGER.debug("Updating pressed for %s", self._name)
self._state = True
self.schedule_update_ha_state()
def _on_switch_released(self):
_LOGGER.debug("Updating released for %s", self._name)
self._state = False
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return if the switch is pressed."""
return self._state
@property
def should_poll(self):
"""Return that polling is not necessary."""
return False
@property
def device_state_attributes(self):
"""Return the device-specific state attributes."""
return {
ATTR_NUMBER: self._index
}
def turn_on(self, **kwargs):
"""Press the switch."""
self._lj.press_switch(self._index)
def turn_off(self, **kwargs):
"""Release the switch."""
self._lj.release_switch(self._index)
| 26.904762
| 74
| 0.652655
|
6ebe95d8e08d07c2d56895a42ec83c841e448ef8
| 3,544
|
py
|
Python
|
ayesaac/services/natural_language_understanding/main.py
|
F21CA-group-6/aye-saac
|
0b1b4d1365b84c80047796a05c10dcfe9c8e921a
|
[
"BSD-3-Clause"
] | 1
|
2021-03-08T13:24:59.000Z
|
2021-03-08T13:24:59.000Z
|
ayesaac/services/natural_language_understanding/main.py
|
F21CA-group-6/aye-saac
|
0b1b4d1365b84c80047796a05c10dcfe9c8e921a
|
[
"BSD-3-Clause"
] | null | null | null |
ayesaac/services/natural_language_understanding/main.py
|
F21CA-group-6/aye-saac
|
0b1b4d1365b84c80047796a05c10dcfe9c8e921a
|
[
"BSD-3-Clause"
] | 1
|
2021-03-11T13:54:00.000Z
|
2021-03-11T13:54:00.000Z
|
from os import listdir
from os.path import isdir, join
from rasa.nlu.model import Interpreter
from ayesaac.services.common import QueueManager
from ayesaac.utils.config import Config
from ayesaac.utils.logger import get_logger
config = Config()
logger = get_logger(__file__)
def contains_word(s, w):
"""
Checks whether a string contains a certain word
"""
return f" {w} " in f" {s} "
def contains_at_least_one_word(s, arr):
"""
Checks whether a string contains at least one word coming from an array of words
"""
for elem in arr:
if contains_word(s, elem):
return True
return False
def check_followup(query):
"""
Checks whether the query is a followup query and returns if we should add the last
entities found to the current query.
"""
if contains_at_least_one_word(
# query, ["it", "that", "this", "them", "they", "those", "these"]
query, ["it", "that", "them", "they", "those", "these"]
):
return True
return False
class NaturalLanguageUnderstanding(object):
"""
The class NaturalLanguageUnderstanding purpose is to sense the objectives of the
query.
"""
def __init__(self):
self.queue_manager = QueueManager([self.__class__.__name__, "Manager"])
self.previous_query = None
model_path = str(config.directory.data.joinpath("rasa", "nlu"))
dirs = [f for f in listdir(model_path) if isdir(join(model_path, f))]
dirs.sort(reverse=True)
model = join(model_path, dirs[0])
self.interpreter = Interpreter.load(model)
logger.info(f"{self.__class__.__name__} ready")
def callback(self, body, **_):
body["ingredientClass"] = "" # ingredient classifications
body["asking"] = body["query"].split()
intents = self.interpreter.parse(body["query"])
try:
if (
intents["intent"]["name"] == "same_intent"
and self.previous_query != None
):
intents["intent"]["name"] = self.previous_query["intent"]["name"]
if (
intents["intent"]["name"] != "recognise"
and intents["intent"]["name"] != "identify"
and check_followup(body["query"]) == True
):
intents["entities"].extend(self.previous_query["entities"])
# set list of ingredients classed as dairy/meat/nuts to ingredient class in body
# removes trailing and before spaces with strip, and all lower case with lower
if (intents["entities"]["value"].strip().lower() == "dairy"):
body["ingredientClass"] = dairy
if (intents["entities"]["value"].strip().lower() == "meat"):
body["ingredientClass"] = meat
if (intents["entities"]["value"].strip().lower() == "nuts"):
body["ingredientClass"] = nuts
except IndexError as error:
logger.error(error)
except Exception as exception:
logger.warning(exception)
self.previous_query = intents
body["intents"] = intents
body["path_done"].append(self.__class__.__name__)
# logger.info(body)
self.queue_manager.publish("Manager", body)
def run(self):
self.queue_manager.start_consuming(self.__class__.__name__, self.callback)
def main():
natural_language_understanding = NaturalLanguageUnderstanding()
natural_language_understanding.run()
if __name__ == "__main__":
main()
| 31.362832
| 86
| 0.615406
|
3650a4d3cd6c02ad1d01cdb72f78256610333141
| 982
|
py
|
Python
|
sdat/tools_sam/split_bam.py
|
Chipeyown/SPLiT-seq-Data-Analysis_Toolkit
|
903fdbeeed8d0ef91c7a08af6226731297b21bdc
|
[
"MIT"
] | 4
|
2019-09-11T07:20:22.000Z
|
2020-09-04T09:21:18.000Z
|
sdat/tools_sam/split_bam.py
|
Chipeyown/SPLiT-seq-Data-Analysis_Toolkit
|
903fdbeeed8d0ef91c7a08af6226731297b21bdc
|
[
"MIT"
] | 1
|
2019-09-10T01:37:18.000Z
|
2019-09-10T01:45:42.000Z
|
sdat/tools_sam/split_bam.py
|
Chipeyown/SPLiT-seq-Data-Analysis_Toolkit
|
903fdbeeed8d0ef91c7a08af6226731297b21bdc
|
[
"MIT"
] | 1
|
2020-09-17T02:24:06.000Z
|
2020-09-17T02:24:06.000Z
|
import subprocess
import time
def split_bam(in_dir, out_folder):
p = subprocess.Popen('samtools view %s' % in_dir, shell=True, stdout=subprocess.PIPE)
chr_list = []
line0 = p.stdout.readline().decode()
chr0 = line0.split('\t')[2]
chr_list.append(chr0)
fo = open('%s/%s.sam' % (out_folder, chr0), 'w') # tmp/sam/Chr1.sam
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + ' Spliting to' + '\033[1;35m %s \033[0m' % chr0 + ' ...')
fo.write(line0)
for line in p.stdout:
line=line.decode()
chr = line.split('\t')[2]
if chr == chr0:
fo.write(line)
else:
fo.close()
chr0 = chr
chr_list.append(chr0)
fo = open('%s/%s.sam' % (out_folder, chr0), 'w')
print('[%s]' % time.strftime("%Y-%m-%d %X", time.localtime()) + ' Spliting to' + '\033[1;35m %s \033[0m' % chr0 + ' ...')
fo.write(line)
fo.close()
return chr_list
| 35.071429
| 133
| 0.528513
|
08033c98ed901d172c8037688f66d333fa92c377
| 1,360
|
py
|
Python
|
api.py
|
unmix-io/unmix-api
|
0d029c0a6b93f46c17e3c13588d649529036d1a2
|
[
"MIT"
] | null | null | null |
api.py
|
unmix-io/unmix-api
|
0d029c0a6b93f46c17e3c13588d649529036d1a2
|
[
"MIT"
] | null | null | null |
api.py
|
unmix-io/unmix-api
|
0d029c0a6b93f46c17e3c13588d649529036d1a2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf8
"""
unmix.io RESTful API to extract vocals and instrumental from audio streams.
"""
__author__ = 'David Flury, Andreas Kaufmann, Raphael Müller'
__email__ = "info@unmix.io"
from flask import Flask
from flask_restful import Api, Resource, reqparse
from flask_cors import CORS
import os
from controllers.dummy import DummyController
from controllers.file import FileController
from controllers.youtube import YouTubeController
from controllers.result import ResultController
from context import Context
def register_controllers(api):
api.add_resource(DummyController, '/dummy/<string:name>')
api.add_resource(YouTubeController, '/predict/youtube')
api.add_resource(FileController, '/predict/file')
api.add_resource(ResultController, '/result/<string:identifier>/<string:type>')
def app():
app = Flask(__name__)
api = Api(app)
register_controllers(api)
CORS(app, resources={"*": {"origins": "*"}})
Context.initialize()
if 'UNMIX_API_TLS_CERTIFICATE' in os.environ and os.environ['UNMIX_API_TLS_CERTIFICATE']:
app.run('0.0.0.0', port=os.environ['UNMIX_API_PORT'], ssl_context=(os.environ['UNMIX_API_TLS_CERTIFICATE'], os.environ['UNMIX_API_TLS_PRIVATEKEY']))
else:
app.run('0.0.0.0', port=os.environ['UNMIX_API_PORT'])
if __name__ == "__main__":
app()
| 30.909091
| 156
| 0.741912
|
c3e1b5d9914fa604affa4b5a0ce7f15002c09cea
| 9,971
|
py
|
Python
|
meta/enc.py
|
Cypher1/naive
|
768443236032149a368f2e0f6eedc0392820367d
|
[
"MIT"
] | 12
|
2017-02-04T17:32:58.000Z
|
2022-02-06T17:26:58.000Z
|
meta/enc.py
|
Cypher1/naive
|
768443236032149a368f2e0f6eedc0392820367d
|
[
"MIT"
] | 4
|
2018-05-13T20:00:04.000Z
|
2021-08-23T10:01:56.000Z
|
meta/enc.py
|
Cypher1/naive
|
768443236032149a368f2e0f6eedc0392820367d
|
[
"MIT"
] | 5
|
2017-10-11T03:33:51.000Z
|
2021-08-18T03:24:20.000Z
|
#!/usr/bin/env python3
# Generates code for encoding assembly instructions (and potentially decoding
# in the future for a disassembler). The format that is parsed matches the
# format used in the Intel architecture manual, read it for further details.
import pprint
import re
import sys
from collections import namedtuple
pp = pprint.PrettyPrinter(indent=4)
Instr = namedtuple('Instr', ['opcode', 'encodings'])
Encoding = namedtuple('Encoding',
['args', 'arg_order', 'use_rex_w', 'use_oso', 'opcode_size', 'opcode',
'reg_and_rm', 'opcode_extension', 'immediate_size', 'reg_in_opcode',
'fixup_type'])
def generate_encoder(input_filename, output_filename):
instrs = {}
with open(input_filename, 'r') as f:
for line in f.readlines():
if line == '\n' or line.startswith('#'):
continue
instr, encoding = line.rstrip('\n').split('=')
instr_components = instr.replace(',', ' ').split()
assert 1 <= len(instr_components) <= 4
instr_name = instr_components[0]
args = instr_components[1:]
if len(args) >= 2:
if args[1].startswith('r/m'):
arg_order = 'RM'
elif args[0].startswith('r/m'):
arg_order = 'MR'
else:
arg_order = 'INVALID'
if 'rel' in args:
fixup_type = 'FIXUP_RELATIVE'
else:
fixup_type = 'FIXUP_ABSOLUTE'
# @TODO: We treat 'i' immediates the same as 'c' immediates. It's
# not entirely clear whether they are actually encoded the same, as
# the manual says that 'i' operands follow the opcode, ModR/M and
# SIB, and that 'c' operands follow the opcode. Look into this
# further if there is an opcode with a 'c' immediate and ModR/M or
# SIB bytes.
match = re.match(
r' *(?P<use_rex_w>REX\.W *\+ *)? *' +
r'(?P<use_oso>OSO *\+ *)? *' +
r'(?P<opcode>([0-9a-fA-F]+|\[[0-9a-fA-F ]+\])) *' +
r'(?P<slash>/.)? *' +
r'(?P<reg_in_opcode>\+r[bwdo])? *' +
r'(?P<immediate>[ic][bwdo])? *',
encoding)
use_rex_w = bool(match.group('use_rex_w'))
use_oso = bool(match.group('use_oso'))
opcode_str = match.group('opcode').strip('[]').replace(' ', '')
opcode = [int(a + b, 16) for a, b in zip(opcode_str[0::2], opcode_str[1::2])]
opcode_size = len(opcode)
slash = match.group('slash')
if slash:
slash = slash[1]
if slash and ('0' <= slash <= '7'):
opcode_extension = ord(slash) - ord('0')
else:
opcode_extension = -1
reg_and_rm = slash == 'r'
immediate = match.group('immediate')
if immediate:
immediate_size = {
'b': 1,
'w': 2,
'd': 4,
'o': 8,
}[immediate[1]]
else:
immediate_size = -1
reg_in_opcode = bool(match.group('reg_in_opcode'))
encoding = Encoding(args, arg_order, use_rex_w, use_oso,
opcode_size, opcode, reg_and_rm, opcode_extension,
immediate_size, reg_in_opcode, fixup_type)
# @TODO: We should sort encodings by immediate size (ascending) so
# that the smallest encoding gets selected automatically.
if instr_name not in instrs:
instrs[instr_name] = Instr(instr_name, [])
instrs[instr_name].encodings.append(encoding)
output = []
output.append("""
// @NOTE: This is an automatically generated file! Do not edit it!
// It was generated from '%s', edit that instead
static void assemble_instr(Array(u8) *output, AsmModule *asm_module, AsmInstr *instr)
{
\tswitch (instr->op) {
""" % input_filename)
#pp.pprint(instrs)
for opcode in instrs:
output.append("\tcase %s:\n" % opcode)
for encoding in instrs[opcode].encodings:
if len(encoding.args) == 0:
indent = '\t\t'
else:
output.append("\t\tif ((instr->arity == %d) && %s) {\n" %
(len(encoding.args), arg_conditions(encoding.args)))
indent = '\t\t\t'
output.append(("%sencode_instr(output, asm_module, instr, %s);\n" +
"%sreturn;\n")
% (indent,
', '.join(map(to_c_val,
[encoding.arg_order, encoding.use_rex_w,
encoding.use_oso, encoding.opcode_size,
encoding.opcode, encoding.reg_and_rm,
encoding.opcode_extension, encoding.immediate_size,
encoding.reg_in_opcode, encoding.fixup_type])),
indent))
if len(encoding.args) != 0:
output.append("\t\t}\n")
output.append("\t\tbreak;\n")
output.append("""
\tdefault: break;
\t}
\t
\tfputs("Unimplemented instruction:\\n", stderr);
\tdump_asm_instr(instr);
\t
\tUNIMPLEMENTED;
}
""")
with open(output_filename, 'w') as f:
f.writelines(output)
def check_width(width):
assert int(width) in [8, 16, 32, 64]
REGISTER_MAP = {
"AL": ("REG_CLASS_A", 8), "AX": ("REG_CLASS_A", 16), "EAX": ("REG_CLASS_A", 32), "RAX": ("REG_CLASS_A", 64),
"BL": ("REG_CLASS_B", 8), "BX": ("REG_CLASS_B", 16), "EBX": ("REG_CLASS_B", 32), "RBX": ("REG_CLASS_B", 64),
"CL": ("REG_CLASS_C", 8), "CX": ("REG_CLASS_C", 16), "ECX": ("REG_CLASS_C", 32), "RCX": ("REG_CLASS_C", 64),
"DL": ("REG_CLASS_D", 8), "DX": ("REG_CLASS_D", 16), "EDX": ("REG_CLASS_D", 32), "RDX": ("REG_CLASS_D", 64),
"DIL": ("REG_CLASS_DI", 8), "DI": ("REG_CLASS_DI", 16), "EDI": ("REG_CLASS_DI", 32), "RDI": ("REG_CLASS_DI", 64),
"SIL": ("REG_CLASS_SI", 8), "SI": ("REG_CLASS_SI", 16), "ESI": ("REG_CLASS_SI", 32), "RSI": ("REG_CLASS_SI", 64),
"BPL": ("REG_CLASS_BP", 8), "BP": ("REG_CLASS_BP", 16), "EBP": ("REG_CLASS_BP", 32), "RBP": ("REG_CLASS_BP", 64),
"SPL": ("REG_CLASS_SP", 8), "SP": ("REG_CLASS_SP", 16), "ESP": ("REG_CLASS_SP", 32), "RSP": ("REG_CLASS_SP", 64),
"R8B": ("REG_CLASS_R8", 8), "R8W": ("REG_CLASS_R8", 16), "R8D": ("REG_CLASS_R8", 32), "R8": ("REG_CLASS_R8", 64),
"R9B": ("REG_CLASS_R9", 8), "R9W": ("REG_CLASS_R9", 16), "R9D": ("REG_CLASS_R9", 32), "R9": ("REG_CLASS_R9", 64),
"R10B": ("REG_CLASS_R10", 8), "R10W": ("REG_CLASS_R10", 16), "R10D": ("REG_CLASS_R10", 32), "R10": ("REG_CLASS_R10", 64),
"R11B": ("REG_CLASS_R11", 8), "R11W": ("REG_CLASS_R11", 16), "R11D": ("REG_CLASS_R11", 32), "R11": ("REG_CLASS_R11", 64),
"R12B": ("REG_CLASS_R12", 8), "R12W": ("REG_CLASS_R12", 16), "R12D": ("REG_CLASS_R12", 32), "R12": ("REG_CLASS_R12", 64),
"R13B": ("REG_CLASS_R13", 8), "R13W": ("REG_CLASS_R13", 16), "R13D": ("REG_CLASS_R13", 32), "R13": ("REG_CLASS_R13", 64),
"R14B": ("REG_CLASS_R14", 8), "R14W": ("REG_CLASS_R14", 16), "R14D": ("REG_CLASS_R14", 32), "R14": ("REG_CLASS_R14", 64),
"R15B": ("REG_CLASS_R15", 8), "R15W": ("REG_CLASS_R15", 16), "R15D": ("REG_CLASS_R15", 32), "R15": ("REG_CLASS_R15", 64),
}
def arg_conditions(args):
conditions = []
ext_width = 0
for i, arg in enumerate(args):
arg_str = 'instr->args[%d]' % i
if arg.startswith('r/m'):
width = arg[3:]
check_width(width)
ext_width = width
conditions.append((
'(({0}.t == ASM_VALUE_REGISTER'
+ ' && {0}.u.reg.width == {1})'
+ ' || ({0}.is_deref'
+ ' && {0}.u.reg.width == 64))').format(arg_str, width))
elif arg[0] == 'r' and all(c.isdigit() for c in arg[1:]):
width = arg[1:]
check_width(width)
ext_width = width
conditions.append((
'({0}.t == ASM_VALUE_REGISTER'
+ ' && {0}.u.reg.width == {1}'
+ ' && !{0}.is_deref)').format(arg_str, width))
elif arg.startswith('imm'):
width = arg[3:]
check_width(width)
conditions.append((
'(is_const_and_fits({0}, {1}, {2}, '
+ 'is_sign_extending_instr(instr)))')
.format(arg_str, ext_width, width))
elif arg == 'rel':
conditions.append('({0}.t == ASM_VALUE_CONST)'.format(arg_str))
elif arg in REGISTER_MAP:
reg_class, width = REGISTER_MAP[arg]
conditions.append((
'({0}.t == ASM_VALUE_REGISTER'
+ ' && {0}.u.reg.u.class == {1}'
+ ' && {0}.u.reg.width == {2})').format(arg_str, reg_class, width))
else:
print("Unknown arg type: '%s'" % arg)
assert False
return ' && '.join(conditions)
def to_c_val(x):
if isinstance(x, bool):
return 'true' if x else 'false'
# Must be in this order as bool is a subclass of int.
if isinstance(x, int):
return hex(x) if x >= 0 else str(x)
if isinstance(x, str):
return x
if isinstance(x, list):
return '(u8[]){ %s }' % ', '.join(map(to_c_val, x))
assert False
if __name__ == '__main__':
if len(sys.argv) not in (2, 3):
print("Usage: %s <enc definition> [output file]" % sys.argv[0])
sys.exit(1)
# @PORT
output_filename = "/dev/stdout" if len(sys.argv) == 2 else sys.argv[2]
generate_encoder(sys.argv[1], output_filename)
| 40.864754
| 129
| 0.512687
|
94af275a644c43f8140f0ab469bbc543eb28564d
| 1,095
|
py
|
Python
|
openedx/admin.py
|
mitodl/mitxonline
|
adf6084b1f4addd57473153ed6bd08ea09bc4685
|
[
"BSD-3-Clause"
] | 1
|
2021-07-25T21:29:12.000Z
|
2021-07-25T21:29:12.000Z
|
openedx/admin.py
|
drhodes/mitxonline
|
2af72db40723e7d61c0229b6fc80216a3a94cd9e
|
[
"BSD-3-Clause"
] | 420
|
2021-07-13T14:58:52.000Z
|
2022-03-31T20:50:10.000Z
|
openedx/admin.py
|
drhodes/mitxonline
|
2af72db40723e7d61c0229b6fc80216a3a94cd9e
|
[
"BSD-3-Clause"
] | 1
|
2021-07-25T21:28:32.000Z
|
2021-07-25T21:28:32.000Z
|
"""
Admin site bindings for profiles
"""
from django.contrib import admin
from openedx.models import OpenEdxApiAuth, OpenEdxUser
class OpenEdxUserAdmin(admin.ModelAdmin):
"""Admin for OpenEdxUser"""
model = OpenEdxUser
search_fields = ["user__username", "user__email", "user__name", "platform"]
list_display = ["id", "user", "has_been_synced", "platform"]
list_filter = ["has_been_synced", "platform"]
raw_id_fields = ["user"]
def get_queryset(self, request):
"""Overrides base queryset"""
return super().get_queryset(request).select_related("user")
class OpenEdxApiAuthAdmin(admin.ModelAdmin):
"""Admin for OpenEdxApiAuth"""
model = OpenEdxApiAuth
list_display = ["id", "user"]
search_fields = ["user__username", "user__email", "user__name"]
raw_id_fields = ["user"]
def get_queryset(self, request):
"""Overrides base queryset"""
return super().get_queryset(request).select_related("user")
admin.site.register(OpenEdxUser, OpenEdxUserAdmin)
admin.site.register(OpenEdxApiAuth, OpenEdxApiAuthAdmin)
| 28.076923
| 79
| 0.703196
|
69e2212e02234c0bbb671ad0c6742df50727cf92
| 1,176
|
py
|
Python
|
apps/home/models.py
|
alex112401/Black-Dashboard-Django
|
e1746b7ef35afda448e47b96f11ac2e7e49d084a
|
[
"MIT"
] | null | null | null |
apps/home/models.py
|
alex112401/Black-Dashboard-Django
|
e1746b7ef35afda448e47b96f11ac2e7e49d084a
|
[
"MIT"
] | null | null | null |
apps/home/models.py
|
alex112401/Black-Dashboard-Django
|
e1746b7ef35afda448e47b96f11ac2e7e49d084a
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class EventList(models.Model):
username = models.ForeignKey(User, on_delete = models.CASCADE)
eventname = models.CharField(max_length=50)
eventdate = models.DateField(null=True)
predtime = models.FloatField(max_length=50)
emerge = models.CharField(max_length=50)
iscomplete = models.BooleanField(default=False)
costtime = models.FloatField(null=True, blank=True)
class Meta:
ordering: ['eventdate']
def __str__(self):
return 'user: %s eventname: %s %s %s %s %s %s'%(self.username, self.eventname, self.eventdate, self.predtime, self.emerge, self.iscomplete , self.costtime)
class Dailyfreetime(models.Model):
username = models.ForeignKey(User, on_delete=models.CASCADE)
timedate = models.DateField(null=True)
freetime = models.FloatField(max_length=50)
busytime = models.FloatField(max_length=50, null=True)
def __str__(self):
return '%s %s %s %s'%(self.username, self.timedate, self.freetime, self.busytime)
| 32.666667
| 163
| 0.705782
|
a4bc47eae6a5d4242780cb28a858e103d6538c90
| 344
|
py
|
Python
|
cobiv/modules/core/gestures/gesture.py
|
gokudomatic/cobiv
|
c095eda704fab319fccc04d43d8099f1e8327734
|
[
"MIT"
] | 4
|
2017-12-26T07:19:46.000Z
|
2019-09-20T08:27:58.000Z
|
cobiv/modules/core/gestures/gesture.py
|
gokudomatic/cobiv
|
c095eda704fab319fccc04d43d8099f1e8327734
|
[
"MIT"
] | 4
|
2017-10-01T12:18:43.000Z
|
2019-06-09T10:29:03.000Z
|
cobiv/modules/core/gestures/gesture.py
|
gokudomatic/cobiv
|
c095eda704fab319fccc04d43d8099f1e8327734
|
[
"MIT"
] | 1
|
2019-01-07T19:58:00.000Z
|
2019-01-07T19:58:00.000Z
|
from cobiv.modules.core.component import Component
class Gesture(Component):
def initialize(self, touches):
pass
def validate(self, touches, strokes):
pass
def process(self, touches, strokes):
pass
def finalize(self, touches, strokes):
pass
def required_touch_count(self):
pass
| 18.105263
| 50
| 0.645349
|
c9477ae1161a1b44f13c2a99d73ed9089e8d1087
| 8,965
|
py
|
Python
|
almost_make/cli.py
|
personalizedrefrigerator/AlmostMake
|
9b91021282315ef1cdf47643e6c3058398eb9111
|
[
"BSD-3-Clause"
] | 7
|
2020-09-04T00:05:57.000Z
|
2021-11-29T20:02:23.000Z
|
almost_make/cli.py
|
personalizedrefrigerator/AlmostMake
|
9b91021282315ef1cdf47643e6c3058398eb9111
|
[
"BSD-3-Clause"
] | 3
|
2021-11-29T19:58:01.000Z
|
2022-02-28T20:54:26.000Z
|
almost_make/cli.py
|
personalizedrefrigerator/AlmostMake
|
9b91021282315ef1cdf47643e6c3058398eb9111
|
[
"BSD-3-Clause"
] | 1
|
2022-02-14T17:32:53.000Z
|
2022-02-14T17:32:53.000Z
|
#!/usr/bin/python3
import sys, os
from almost_make.utils.printUtil import *
import almost_make.utils.makeUtil as makeUtility
import almost_make.utils.macroUtil as macroUtility
from almost_make.utils.argsUtil import *
from almost_make import version
ARGUMENT_MAPPINGS = \
{
'h': "help",
'k': 'keep-going',
'p': 'print-expanded',
'n': 'just-print',
'f': 'file',
'C': 'directory',
's': 'silent',
'b': 'built-in-shell',
'w': 'print-directory',
'j': 'jobs'
}
# These are flags, so don't associate values with them...
JUST_FLAGS = \
{
'help', 'keep-going', 'print-expanded', 'just-print', 'silent', 'built-in-shell',
'print-directory', 'undefined-is-error'
}
# Don't save these when we recurse...
NO_SAVE_ARGS = \
{
'C', 'directory',
'f', 'file',
'default',
'h', 'help',
'version'
}
def printHelp():
cprint("Help: \n", FORMAT_COLORS['YELLOW'])
cprint(" Summary: ", FORMAT_COLORS['YELLOW'])
print("Satisfy dependencies of a target in a makefile. This parser is not quite POSIX-compliant, but should be able to parse simple makefiles.")
cprint(" Usage: almake [targets...] [options]\n", FORMAT_COLORS['YELLOW'])
print(" where each target in targets is a valid target and options include:")
cprint(" -h, --help", FORMAT_COLORS['GREEN'])
print("\t\t\t Print this message.")
cprint(" --version", FORMAT_COLORS['GREEN'])
print("\t\t\t Print version and licensing information.")
cprint(" --file", FORMAT_COLORS['GREEN'])
print("\t\t\t File to parse (default is Makefile).")
cprint(" -k", FORMAT_COLORS['GREEN'])
print("\t\t\t\t Keep going if errors are encountered.")
cprint(" -n, --just-print", FORMAT_COLORS['GREEN'])
cprint("\t\t Just print commands to be run, without evaluating (print commands, don't send them to the shell). ")
print("Be aware that $(shell ...) macros are still evaluated. This option only applies to individual commands.")
cprint(" -p", FORMAT_COLORS['GREEN'])
print("\t\t\t\t Rather than finding targets, print the makefile, with top-level targets expanded.")
cprint(" -C dir", FORMAT_COLORS['GREEN'])
print("\t\t\t Switch to directory, dir, before running make. ")
cprint(" -w, --print-directory", FORMAT_COLORS['GREEN'])
print("\t Print the current directory before and after running make. ")
cprint(" -j, --jobs", FORMAT_COLORS['GREEN'])
print("\t\t\t Maximum number of jobs (e.g. almake -j 8). ")
cprint(" -s, --silent", FORMAT_COLORS['GREEN'])
print("\t\t In most cases, don't print output.")
cprint(" --undefined-is-error", FORMAT_COLORS['GREEN'])
print("\t\t Display an error when attempting to use an undefined macro.")
cprint(" --expand-undefined-to value", FORMAT_COLORS['GREEN'])
print("\t Expand undefined macros to value, rather than expanding to nothing.")
cprint(" -b, --built-in-shell", FORMAT_COLORS['GREEN'])
print("\t Use the built-in shell for commands in the makefile. This can also be enabled as follows:")
cprint(" export ", FORMAT_COLORS['PURPLE'])
print("_BUILTIN_SHELL ", end='')
cprint(":= ", FORMAT_COLORS['YELLOW'])
print("1 \t\t", end='')
cprint("# Use the built-in shell instead of the system shell.", FORMAT_COLORS['GREEN'])
print()
cprint(" export", FORMAT_COLORS['PURPLE'])
print(" _CUSTOM_BASE_COMMANDS ", end='')
cprint(":= ", FORMAT_COLORS['YELLOW'])
print("1 \t", end='')
cprint("# Enable built-in overrides for several commands like ls, echo, cat, grep, and pwd.", FORMAT_COLORS['GREEN'])
print()
cprint(" export", FORMAT_COLORS['PURPLE'])
print(" _SYSTEM_SHELL_PIPES ", end='')
cprint(":= ", FORMAT_COLORS['YELLOW'])
print("1 \t", end='')
cprint("# Send commands that seem related to pipes (e.g. ls | less) directly to the system's shell. ", FORMAT_COLORS['GREEN'])
print()
cprint("Note: ", FORMAT_COLORS['PURPLE'])
print("AlmostMake's built-in shell is currently very limited.")
print()
cprint("Note: ", FORMAT_COLORS['PURPLE'])
print("Macro definitions that override those from the environment" +
" can be provided in addition to targets and options. For example,")
cprint(" make target1 target2 target3 CC=gcc CFLAGS=-O3", FORMAT_COLORS['YELLOW'])
print()
print("should make target1, target2, and target3 with the " +
"macros CC and CFLAGS by default set to gcc and -O3, respectively.")
cprint("Note: ", FORMAT_COLORS['PURPLE'])
print("Options can also be given to almake through the environment. " +
"This is done through the MAKEFLAGS variable. For example, " +
"setting MAKEFLAGS to --built-in-shell causes almake to " +
"always use its built-in shell, rather than the system shell.")
# On commandline run...
def main(args=sys.argv):
args = parseArgs(args, ARGUMENT_MAPPINGS, strictlyFlags=JUST_FLAGS)
# Fill args from MAKEFLAGS (see https://www.gnu.org/software/make/manual/make.html#How-the-MAKE-Variable-Works)
args = fillArgsFromEnv(args, "MAKEFLAGS", ARGUMENT_MAPPINGS, JUST_FLAGS) # Previously-defined args take precedence.
saveArgsInEnv(args, "MAKEFLAGS", NO_SAVE_ARGS) # For recursive calls to make.
if 'help' in args:
printHelp()
elif 'version' in args:
version.printVersion()
else:
macroUtil = macroUtility.MacroUtil()
makeUtil = makeUtility.MakeUtil()
fileName = 'Makefile'
targets = []
defaultMacros = macroUtil.getDefaultMacros() # Fills with macros from environment, etc.
overrideMacros = {}
if 'directory' in args:
try:
os.chdir(args['directory'])
except Exception as ex:
print("Error changing directories: %s" % str(ex))
sys.exit(1)
# If we know the path to the python interpreter...
if sys.executable:
defaultMacros["MAKE"] = sys.executable + " " + os.path.abspath(__file__)
#^ Use ourself, rather than another make implementation.
if 'keep-going' in args:
makeUtil.setStopOnError(False)
if 'silent' in args:
makeUtil.setSilent(True)
if 'jobs' in args:
jobs = 1
try:
jobs = int(args['jobs'])
except ValueError as ex:
makeUtil.errorUtil.reportError("Invalid argument to --jobs. This must be an integer.")
makeUtil.setMaxJobs(jobs)
if 'just-print' in args:
makeUtil.setJustPrint(True)
if 'file' in args:
fileName = args['file']
if 'expand-undefined-to' in args:
makeUtil.setDefaultMacroExpansion(runner.stripQuotes(args["expand-undefined-to"]))
if 'undefined-is-error' in args:
makeUtil.setDefaultMacroExpansion(None)
if len(args['default']) > 0:
targets = [ ]
# Split into targets and default macros.
for arg in args['default']:
assignmentIndex = arg.find("=")
if assignmentIndex > 0:
key = arg[:assignmentIndex].strip() # e.g. VAR in VAR=33
val = arg[assignmentIndex+1:].strip() # e.g. 33 in VAR=33
overrideMacros[key] = val
defaultMacros[key] = val
else:
targets.append(arg)
# Were we told to use the built-in shell?
if 'built-in-shell' in args:
overrideMacros["_BUILTIN_SHELL"] = "1"
overrideMacros["_CUSTOM_BASE_COMMANDS"] = "1"
if len(targets) == 0: # Select the default target, if no targets
targets = ['']
if not os.path.exists(fileName):
cprint("The file with name \"%s\" was not found!\n" % fileName, FORMAT_COLORS['RED'])
print("Please check your spelling.")
sys.exit(1)
fileObj = open(fileName, 'r')
fileContents = fileObj.read()
fileObj.close()
if 'print-directory' in args:
cprint("make: ", FORMAT_COLORS['YELLOW'])
print ('Entering directory %s' % runner.quote(os.getcwd()))
if not 'print-expanded' in args:
# Run for each target.
for target in targets:
makeUtil.runMakefile(fileContents, target, defaultMacros, overrideMacros)
else:
contents, macros = macroUtil.expandAndDefineMacros(fileContents, defaultMacros)
contents, macros = makeUtil.handleIncludes(contents, macros)
print(contents)
if 'print-directory' in args:
cprint("make: ", FORMAT_COLORS['YELLOW'])
print ('Leaving directory %s' % runner.quote(os.getcwd()))
if __name__ == "__main__":
main()
| 41.313364
| 148
| 0.609035
|
0c9093a1bbfc9ca1ce36e266432acd577b30ad92
| 19,424
|
py
|
Python
|
cottoncandy/Encryption.py
|
moflo/cottoncandy
|
b38a1a7094474a29224fbc518d391384f770da24
|
[
"BSD-2-Clause"
] | 35
|
2016-08-01T19:34:19.000Z
|
2022-02-07T19:45:27.000Z
|
cottoncandy/Encryption.py
|
moflo/cottoncandy
|
b38a1a7094474a29224fbc518d391384f770da24
|
[
"BSD-2-Clause"
] | 56
|
2016-08-16T23:35:02.000Z
|
2022-03-13T02:39:06.000Z
|
cottoncandy/Encryption.py
|
moflo/cottoncandy
|
b38a1a7094474a29224fbc518d391384f770da24
|
[
"BSD-2-Clause"
] | 15
|
2016-10-03T00:50:11.000Z
|
2020-03-29T04:45:13.000Z
|
from __future__ import print_function
import os
import struct
from Crypto import Random
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from io import BytesIO
CIPHER_BLOCK_CHAIN = AES.MODE_CBC
INIT_VECT_SIZE = 16
DEFAULT_CHUNK_SIZE = 64 * 1024
FILE_LENGTH_FIELD_SIZE = struct.calcsize('Q')
WHENCE_EOF = 2
class Encryption(object):
"""
Abstract base class for a file encrypt/decrypt object
"""
def __init__(self, key=None, keyfile=None):
"""
Parameters
----------
key : str
key to use
keyfile : str
key from which to read stored key
"""
if key is not None:
self.key = key
elif keyfile is not None:
self.read_key(keyfile)
else:
self.generate_key()
def read_key(self, file_name):
"""Reads a stored key
Parameters
----------
file_name : str
name of file to read from
Returns
-------
"""
with open(file_name) as file:
self.key = file.read()
def store_key(self, file_name='key.key'):
"""Stores key to file
Parameters
----------
file_name : str
path of tile to store
Returns
-------
"""
with open(file_name) as keyFile:
keyFile.write(self.key)
def generate_key(self):
"""Generates a key for encrypting/decrypting files for this object
"""
raise NotImplementedError
def encrypt_file(self, file_name, encrypted_file_name=None):
"""Encrypts a file on disk
Parameters
----------
file_name : str
path to file to be encrypted
encrypted_file_name : str
path for encrypted file, if None, appends 'enc' to the file extenstion
Returns
-------
"""
raise NotImplementedError
def encrypt_stream(self, instream):
"""Encrypts a stream object in memory
Parameters
----------
instream : stream
object in memory with a .read() function
Returns
-------
: stream
encrypted object
"""
raise NotImplementedError
def decrypt_file(self, file_name, key=None, decrypted_file_name=None):
"""Decrypts a file on disk
Parameters
----------
file_name : str
path to encrypted file
key : str
key to use for decryption
decrypted_file_name : str
name for decrypted file, if None, strips the last 3 chars off the name (assumes 'enc' on extension)
Returns
-------
"""
raise NotImplementedError
def decrypt_stream(self, instream, key=None):
"""Decrypts a stream in memory
Parameters
----------
instream : stream
object with .read() function
key : str
key to use for decryption
Returns
-------
: stream
decrypted stream
"""
raise NotImplementedError
class AESEncryption(Encryption):
"""
Encrypts files using an AES cipher. All files passing through the same object will be encrypted with the same key
Encrypted files have the following structure:
[ 16 bytes default ][ file size || file size % 16 - 8 bytes ][ 8 bytes ]
[initialization vector][ binary ciphertext for file || padding with spaces ][file size in bytes]
"""
def __init__(self, key=None, keyfile=None, mode=CIPHER_BLOCK_CHAIN, chunk_size=DEFAULT_CHUNK_SIZE, initialisation_vector_size=INIT_VECT_SIZE):
"""
Parameters
----------
key : str
key to use
keyfile : str
path to stored key, overrides key value
mode : int
encrytion mode
chunk_size : int
size in bytes of each chunk during encryption
initialisation_vector_size : int
bytes of initialisation vector
"""
super(AESEncryption, self).__init__(key, keyfile)
self.mode = mode
self.chunk_size = chunk_size
self.initialisation_vector_size = initialisation_vector_size
def generate_key(self, key_size=32):
"""Generates a new AES key
Parameters
----------
key_size : int
bits in key
Returns
-------
"""
if key_size not in [16, 24, 32]:
raise RuntimeError('Bad key length')
self.key = Random.get_random_bytes(key_size)
def encrypt_file(self, file_name, encrypted_file_name=None, key=None, chunk_size=None, initialisation_vector_size=None):
"""
Parameters
----------
file_name : str
path to file
encrypted_file_name : str
name for encrypted file
key : str
key to use
chunk_size : int
bytes in each chunk
initialisation_vector_size : int
bytes in initialisation vector
Returns
-------
"""
if chunk_size is not None and chunk_size % 16 != 0:
raise RuntimeError('Bad chunk_size')
if encrypted_file_name is None:
encrypted_file_name = file_name + 'enc'
if chunk_size is None:
chunk_size = self.chunk_size
if initialisation_vector_size is None:
initialisation_vector_size = self.initialisation_vector_size
# Initialize AES encryption object
initialisation_vector = Random.new().read(initialisation_vector_size)
encryptor = AES.new(self.key if key is None else key, self.mode, initialisation_vector)
# record file size since the encrypted file might be padded
input_size = os.path.getsize(file_name)
file_length_field = struct.pack('<Q', input_size)
with open(file_name, 'rb') as input_file:
with open(encrypted_file_name, 'wb') as output_file:
assert len(initialisation_vector) == initialisation_vector_size
output_file.write(initialisation_vector) # record the initialization vector
# encrypt the file in chunks
this_chunk = None
is_final_chunk = False
while not is_final_chunk:
this_chunk = input_file.read(chunk_size)
if len(this_chunk) == 0 or len(this_chunk) % 16 != 0: # end of file
# pad the end chunk if necessary - AES needs things to be in 16-byte blocks
padSize = 16 - (len(this_chunk) + FILE_LENGTH_FIELD_SIZE) % 16
padding = ' ' * padSize
this_chunk += padding
this_chunk += file_length_field # record the actual file length so we can get ride of the padding on decryption
is_final_chunk = True
output_file.write(encryptor.encrypt(this_chunk))
del encryptor
def encrypt_stream(self, instream, key=None, chunk_size=None, initialisation_vector_size=None):
"""
Parameters
----------
instream : stream
object in memory with a .read() function
key : str
key to use
chunk_size : int
bytes in each chunk
initialisation_vector_size : int
bytes in initialisation vector
Returns
-------
output_stream : stream
encrypted stream
"""
if chunk_size is None:
chunk_size = self.chunk_size
if chunk_size is not None and chunk_size % 16 != 0:
raise RuntimeError('Bad chunk_size')
if initialisation_vector_size is None:
initialisation_vector_size = self.initialisation_vector_size
initialisation_vector = Random.new().read(initialisation_vector_size)
encryptor = AES.new(self.key if key is None else key, self.mode, initialisation_vector)
instream.seek(0, os.SEEK_END)
input_size = instream.tell()
instream.seek(0)
file_length_field = struct.pack('<Q', input_size)
output_stream = BytesIO()
output_stream.write(initialisation_vector)
this_chunk = None
is_final_chunk = False
while not is_final_chunk:
this_chunk = instream.read(chunk_size)
if len(this_chunk) == 0 or len(this_chunk) % 16 != 0: # end of file
# pad the end chunk if necessary
padSize = 16 - (len(this_chunk) + FILE_LENGTH_FIELD_SIZE) % 16
padding = ' ' * padSize
this_chunk += padding
this_chunk += file_length_field
is_final_chunk = True
output_stream.write(encryptor.encrypt(this_chunk))
del encryptor
output_stream.seek(0)
return output_stream
def decrypt_file(self, file_name, key=None, decrypted_file_name=None, chunk_size=None, initialisation_vector_size=None):
"""
Parameters
----------
file_name : str
path to encrypted file
key : str
key to use
decrypted_file_name : str
name for decrypted file
chunk_size : int
bytes in each chunk
initialisation_vector_size : int
bytes in initialisation vector
Returns
-------
"""
if chunk_size is not None and chunk_size % 16 != 0:
raise RuntimeError('Bad chunk_size')
if decrypted_file_name is None:
decrypted_file_name = file_name[:-3] # strip off the 'enc' in the file extension
if chunk_size is None:
chunk_size = self.chunk_size
if initialisation_vector_size is None:
initialisation_vector_size = self.initialisation_vector_size
with open(file_name, 'rb') as input_file:
initialisation_vector = input_file.read(initialisation_vector_size)
decryptor = AES.new(self.key if key is None else key, self.mode, initialisation_vector)
with open(decrypted_file_name, 'wb+') as output_file:
this_chunk = None
is_final_chunk = False
while not is_final_chunk:
this_chunk = input_file.read(chunk_size)
if len(this_chunk) == 0: # EOF
# read out actual size of file
output_file.seek(-FILE_LENGTH_FIELD_SIZE, WHENCE_EOF)
file_length_field = output_file.read(FILE_LENGTH_FIELD_SIZE)
original_size = struct.unpack('<Q', file_length_field)[0]
is_final_chunk = True
output_file.write(decryptor.decrypt(this_chunk))
# truncate to original size
output_file.truncate(original_size)
del decryptor
def decrypt_stream(self, instream, key=None, chunk_size=None, initialisation_vector_size=None):
"""
Parameters
----------
instream : stream
object in memory with a .read() function
key : str
key to use
chunk_size : int
bytes in each chunk
initialisation_vector_size : int
bytes in initialisation vector
Returns
-------
output_stream : stream
decrypted stream
"""
if chunk_size is not None and chunk_size % 16 != 0:
raise RuntimeError('Bad chunk_size')
if chunk_size is None:
chunk_size = self.chunk_size
if initialisation_vector_size is None:
initialisation_vector_size = self.initialisation_vector_size
instream.seek(0)
initialisation_vector = instream.read(initialisation_vector_size)
decryptor = AES.new(self.key if key is None else key, self.mode, initialisation_vector)
output_stream = BytesIO()
this_chunk = None
is_final_chunk = False
while not is_final_chunk:
this_chunk = instream.read(chunk_size)
if len(this_chunk) == 0:
output_stream.seek(-FILE_LENGTH_FIELD_SIZE, WHENCE_EOF)
file_length_field = output_stream.read(FILE_LENGTH_FIELD_SIZE)
original_size = struct.unpack('<Q', file_length_field)[0]
is_final_chunk = True
output_stream.write(decryptor.decrypt(this_chunk))
del decryptor
output_stream.truncate(original_size)
output_stream.seek(0)
return output_stream
class RSAAESEncryption(AESEncryption):
"""
Encrypts each file using a unique AES key. Keys are then encrypted with RSA and returned along with the objects,
the RSA private key is needed to decrypt the AES key.
"""
def __init__(self, key=None, keyfile=None, mode=CIPHER_BLOCK_CHAIN, chunk_size=DEFAULT_CHUNK_SIZE, initialisation_vector_size=INIT_VECT_SIZE, AES_key_length=32):
"""
Parameters
----------
key : str
key to use
keyfile : str
path to key file
mode : int
AES encryption method
chunk_size : int
bytes in each chunk
initialisation_vector_size : int
bytes in initialisation vector
AES_key_length : int
bits in AES keys
"""
super(RSAAESEncryption, self).__init__(key, keyfile, mode, chunk_size, initialisation_vector_size)
if AES_key_length not in [16, 24, 32]:
raise ValueError('Bad AES key size')
self.AES_key_length = AES_key_length
@property
def can_decrypt(self):
"""Can this object decrypt? i.e. does it have a private RSA key?
"""
if not self.RSAcipher:
return False
return self.RSAcipher.can_decrypt()
@property
def can_encrypt(self):
"""Can this object encrypt?, i.e. does it have a public key?
"""
if not self.RSAcipher:
return False
return self.RSAcipher.can_encrypt()
def generate_key(self, key_size=2048):
"""
Parameters
----------
key_size : int
bits in RSA key
Returns
-------
"""
if key_size % 256 != 0:
raise ValueError('RSA key size must be divisible by 256')
self.key = RSA.generate(key_size) # RSA key
self.RSAcipher = PKCS1_OAEP.new(self.key) # salts/pads things to be encrypted
def read_key(self, file_name):
with open(file_name) as keyFile:
self.key = RSA.importKey(keyFile.read())
self.RSAcipher = PKCS1_OAEP.new(self.key)
def store_key(self, file_name='key.key', public=True):
"""
Parameters
----------
file_name
public : bool
store public key only?
Returns
-------
"""
with open(file_name, 'w') as keyfile:
if not public:
keyfile.write(self.key.exportKey())
else:
keyfile.write(self.key.publickey().exportKey())
def generate_AES_key(self, key_size=None):
"""Generates a new AES key
Parameters
----------
key_size : int
bits in key
Returns
-------
key : str
AES key
"""
if key_size is None:
key_size = self.AES_key_length
if key_size not in [16, 24, 32]:
raise ValueError('Bad AES key size')
return Random.get_random_bytes(key_size)
def encrypt_file(self, file_name, encrypted_file_name=None, AESkey=None, chunk_size=None, initialisation_vector_size=None):
"""
Parameters
----------
file_name
encrypted_file_name
AESkey : str
AES key to use for this particular file
chunk_size : int
bytes in each chunk
initialisation_vector_size : int
bytes in initialisation vector
Returns
-------
"""
if AESkey is None:
AESkey = self.generate_AES_key()
super(RSAAESEncryption, self).encrypt_file(file_name, encrypted_file_name, AESkey, chunk_size, initialisation_vector_size)
return self.RSAcipher.encrypt(AESkey)
def encrypt_stream(self, instream, AESkey=None, chunk_size=None, initialisation_vector_size=None):
"""
Parameters
----------
instream : stream
object in memory with a .read() function
AESkey : str
AES key to use for this particular stream
chunk_size
initialisation_vector_size
Returns
-------
"""
if AESkey is None:
AESkey = self.generate_AES_key()
outstream = super(RSAAESEncryption, self).encrypt_stream(instream, AESkey, chunk_size, initialisation_vector_size)
return outstream, self.RSAcipher.encrypt(AESkey)
def encrypt_string(self, plaintext):
"""Encrypts a string
Parameters
----------
plaintext : str
plaintext
Returns
-------
ciphertext : str
ciphertext
"""
return self.RSAcipher.encrypt(plaintext)
def decrypt_string(self, ciphertext):
"""Decrypts a string
Parameters
----------
ciphertext : str
ciphertext
Returns
-------
plaintext : str
plaintext
"""
return self.RSAcipher.decrypt(ciphertext)
def decrypt_file(self, file_name, encrypted_AES_key=None, decrypted_file_name=None, chunk_size=None, initialisation_vector_size=None):
"""
Parameters
----------
file_name : str
path to encrypted file
encrypted_AES_key : str
The encrypted AES key associated with this file
decrypted_file_name : str
name for decrypted file
chunk_size : int
bytes in each chunk
initialisation_vector_size : int
bytes in initialisation vector
Returns
-------
"""
if encrypted_AES_key is None:
raise RuntimeError('You need a key!')
AESkey = self.RSAcipher.decrypt(encrypted_AES_key)
super(RSAAESEncryption, self).decrypt_file(file_name, AESkey, decrypted_file_name, chunk_size, initialisation_vector_size)
def decrypt_stream(self, instream, encrypted_AES_Key=None, chunk_size=None, initialisation_vector_size=None):
"""
Parameters
----------
instream : stream
object in memory with a .read() function
encrypted_AES_Key : str
The encrypted AES key associated with this file
chunk_size : int
bytes in each chunk
initialisation_vector_size : int
bytes in initialisation vector
Returns
-------
"""
if encrypted_AES_Key is None:
raise RuntimeError('You need a key!')
AESkey = self.RSAcipher.decrypt(encrypted_AES_Key)
return super(RSAAESEncryption, self).decrypt_stream(instream, AESkey, chunk_size, initialisation_vector_size)
| 30.540881
| 165
| 0.58196
|
67e5917141e14d476e102357580ce92b2c81cd92
| 18,553
|
py
|
Python
|
Resnet12/data/mini_imagenet.py
|
HanChangHun/dsn_fewshot
|
dbe8d637bce1cb17bfb7c7fd7784bcdebb79085c
|
[
"MIT"
] | 80
|
2020-06-09T06:27:30.000Z
|
2022-03-09T22:23:00.000Z
|
Resnet12/data/mini_imagenet.py
|
HanChangHun/dsn_fewshot
|
dbe8d637bce1cb17bfb7c7fd7784bcdebb79085c
|
[
"MIT"
] | 14
|
2020-09-20T18:54:29.000Z
|
2022-02-08T04:43:39.000Z
|
Resnet12/data/mini_imagenet.py
|
HanChangHun/dsn_fewshot
|
dbe8d637bce1cb17bfb7c7fd7784bcdebb79085c
|
[
"MIT"
] | 15
|
2020-07-09T13:09:08.000Z
|
2022-03-12T13:34:16.000Z
|
# Dataloader of Gidaris & Komodakis, CVPR 2018
# Adapted from:
# https://github.com/gidariss/FewShotWithoutForgetting/blob/master/dataloader.py
from __future__ import print_function
import os
import os.path
import numpy as np
import random
import pickle
import json
import math
import torch
import torch.utils.data as data
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torchnet as tnt
import h5py
from PIL import Image
from PIL import ImageEnhance
from pdb import set_trace as breakpoint
# Set the appropriate paths of the datasets here.
_MINI_IMAGENET_DATASET_DIR = './miniimagenet/' ## your miniimagenet folder
def buildLabelIndex(labels):
label2inds = {}
for idx, label in enumerate(labels):
if label not in label2inds:
label2inds[label] = []
label2inds[label].append(idx)
return label2inds
def load_data(file):
try:
with open(file, 'rb') as fo:
data = pickle.load(fo)
return data
except:
with open(file, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
data = u.load()
return data
class MiniImageNet(data.Dataset):
def __init__(self, phase='train', do_not_use_random_transf=False):
self.base_folder = 'miniImagenet'
#assert(phase=='train' or phase=='val' or phase=='test' or ph)
self.phase = phase
self.name = 'MiniImageNet_' + phase
print('Loading mini ImageNet dataset - phase {0}'.format(phase))
file_train_categories_train_phase = os.path.join(
_MINI_IMAGENET_DATASET_DIR,
'miniImageNet_category_split_train_phase_train.pickle')
file_train_categories_val_phase = os.path.join(
_MINI_IMAGENET_DATASET_DIR,
'miniImageNet_category_split_train_phase_val.pickle')
file_train_categories_test_phase = os.path.join(
_MINI_IMAGENET_DATASET_DIR,
'miniImageNet_category_split_train_phase_test.pickle')
file_val_categories_val_phase = os.path.join(
_MINI_IMAGENET_DATASET_DIR,
'miniImageNet_category_split_val.pickle')
file_test_categories_test_phase = os.path.join(
_MINI_IMAGENET_DATASET_DIR,
'miniImageNet_category_split_test.pickle')
if self.phase=='train':
# During training phase we only load the training phase images
# of the training categories (aka base categories).
data_train = load_data(file_train_categories_train_phase)
self.data = data_train['data']
self.labels = data_train['labels']
self.label2ind = buildLabelIndex(self.labels)
self.labelIds = sorted(self.label2ind.keys())
self.num_cats = len(self.labelIds)
self.labelIds_base = self.labelIds
self.num_cats_base = len(self.labelIds_base)
elif self.phase == 'trainval':
# During training phase we only load the training phase images
# of the training categories (aka base categories).
data_train = load_data(file_train_categories_train_phase)
self.data = data_train['data']
self.labels = data_train['labels']
data_base = load_data(file_train_categories_val_phase)
data_novel = load_data(file_val_categories_val_phase)
self.data = np.concatenate(
[self.data, data_novel['data']], axis=0)
self.data = np.concatenate(
[self.data, data_base['data']], axis=0)
self.labels = np.concatenate(
[self.labels, data_novel['labels']], axis=0)
self.labels = np.concatenate(
[self.labels, data_base['labels']], axis=0)
self.label2ind = buildLabelIndex(self.labels)
self.labelIds = sorted(self.label2ind.keys())
self.num_cats = len(self.labelIds)
self.labelIds_base = self.labelIds
self.num_cats_base = len(self.labelIds_base)
elif self.phase=='val' or self.phase=='test':
if self.phase=='test':
# load data that will be used for evaluating the recognition
# accuracy of the base categories.
data_base = load_data(file_train_categories_test_phase)
# load data that will be use for evaluating the few-shot recogniton
# accuracy on the novel categories.
data_novel = load_data(file_test_categories_test_phase)
else: # phase=='val'
# load data that will be used for evaluating the recognition
# accuracy of the base categories.
data_base = load_data(file_train_categories_val_phase)
# load data that will be use for evaluating the few-shot recogniton
# accuracy on the novel categories.
data_novel = load_data(file_val_categories_val_phase)
self.data = np.concatenate(
[data_base['data'], data_novel['data']], axis=0)
self.labels = data_base['labels'] + data_novel['labels']
self.label2ind = buildLabelIndex(self.labels)
self.labelIds = sorted(self.label2ind.keys())
self.num_cats = len(self.labelIds)
self.labelIds_base = buildLabelIndex(data_base['labels']).keys()
self.labelIds_novel = buildLabelIndex(data_novel['labels']).keys()
self.num_cats_base = len(self.labelIds_base)
self.num_cats_novel = len(self.labelIds_novel)
intersection = set(self.labelIds_base) & set(self.labelIds_novel)
assert(len(intersection) == 0)
else:
raise ValueError('Not valid phase {0}'.format(self.phase))
mean_pix = [x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]]
std_pix = [x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]]
normalize = transforms.Normalize(mean=mean_pix, std=std_pix)
if (self.phase=='test' or self.phase=='val') or (do_not_use_random_transf==True):
self.transform = transforms.Compose([
lambda x: np.asarray(x),
transforms.ToTensor(),
normalize
])
else:
self.transform = transforms.Compose([
transforms.RandomCrop(84, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
transforms.ToTensor(),
normalize
])
def __getitem__(self, index):
img, label = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, label
def __len__(self):
return len(self.data)
class FewShotDataloader():
def __init__(self,
dataset,
nKnovel=5, # number of novel categories.
nKbase=-1, # number of base categories.
nExemplars=1, # number of training examples per novel category.
nTestNovel=15*5, # number of test examples for all the novel categories.
nTestBase=15*5, # number of test examples for all the base categories.
batch_size=1, # number of training episodes per batch.
num_workers=4,
epoch_size=2000, # number of batches per epoch.
):
self.dataset = dataset
self.phase = self.dataset.phase
max_possible_nKnovel = (self.dataset.num_cats_base if self.phase=='train' or self.phase=='trainval'
else self.dataset.num_cats_novel)
assert(nKnovel >= 0 and nKnovel < max_possible_nKnovel)
self.nKnovel = nKnovel
max_possible_nKbase = self.dataset.num_cats_base
nKbase = nKbase if nKbase >= 0 else max_possible_nKbase
if (self.phase=='train'or self.phase=='trainval') and nKbase > 0:
nKbase -= self.nKnovel
max_possible_nKbase -= self.nKnovel
assert(nKbase >= 0 and nKbase <= max_possible_nKbase)
self.nKbase = nKbase
self.nExemplars = nExemplars
self.nTestNovel = nTestNovel
self.nTestBase = nTestBase
self.batch_size = batch_size
self.epoch_size = epoch_size
self.num_workers = num_workers
self.is_eval_mode = (self.phase=='test') or (self.phase=='val')
def sampleImageIdsFrom(self, cat_id, sample_size=1):
"""
Samples `sample_size` number of unique image ids picked from the
category `cat_id` (i.e., self.dataset.label2ind[cat_id]).
Args:
cat_id: a scalar with the id of the category from which images will
be sampled.
sample_size: number of images that will be sampled.
Returns:
image_ids: a list of length `sample_size` with unique image ids.
"""
assert(cat_id in self.dataset.label2ind)
assert(len(self.dataset.label2ind[cat_id]) >= sample_size)
# Note: random.sample samples elements without replacement.
return random.sample(self.dataset.label2ind[cat_id], sample_size)
def sampleCategories(self, cat_set, sample_size=1):
"""
Samples `sample_size` number of unique categories picked from the
`cat_set` set of categories. `cat_set` can be either 'base' or 'novel'.
Args:
cat_set: string that specifies the set of categories from which
categories will be sampled.
sample_size: number of categories that will be sampled.
Returns:
cat_ids: a list of length `sample_size` with unique category ids.
"""
if cat_set=='base':
labelIds = self.dataset.labelIds_base
elif cat_set=='novel':
labelIds = self.dataset.labelIds_novel
else:
raise ValueError('Not recognized category set {}'.format(cat_set))
assert(len(labelIds) >= sample_size)
# return sample_size unique categories chosen from labelIds set of
# categories (that can be either self.labelIds_base or self.labelIds_novel)
# Note: random.sample samples elements without replacement.
return random.sample(labelIds, sample_size)
def sample_base_and_novel_categories(self, nKbase, nKnovel):
"""
Samples `nKbase` number of base categories and `nKnovel` number of novel
categories.
Args:
nKbase: number of base categories
nKnovel: number of novel categories
Returns:
Kbase: a list of length 'nKbase' with the ids of the sampled base
categories.
Knovel: a list of lenght 'nKnovel' with the ids of the sampled novel
categories.
"""
if self.is_eval_mode:
assert(nKnovel <= self.dataset.num_cats_novel)
# sample from the set of base categories 'nKbase' number of base
# categories.
Kbase = sorted(self.sampleCategories('base', nKbase))
# sample from the set of novel categories 'nKnovel' number of novel
# categories.
Knovel = sorted(self.sampleCategories('novel', nKnovel))
else:
# sample from the set of base categories 'nKnovel' + 'nKbase' number
# of categories.
cats_ids = self.sampleCategories('base', nKnovel+nKbase)
assert(len(cats_ids) == (nKnovel+nKbase))
# Randomly pick 'nKnovel' number of fake novel categories and keep
# the rest as base categories.
random.shuffle(cats_ids)
Knovel = sorted(cats_ids[:nKnovel])
Kbase = sorted(cats_ids[nKnovel:])
return Kbase, Knovel
def sample_test_examples_for_base_categories(self, Kbase, nTestBase):
"""
Sample `nTestBase` number of images from the `Kbase` categories.
Args:
Kbase: a list of length `nKbase` with the ids of the categories from
where the images will be sampled.
nTestBase: the total number of images that will be sampled.
Returns:
Tbase: a list of length `nTestBase` with 2-element tuples. The 1st
element of each tuple is the image id that was sampled and the
2nd elemend is its category label (which is in the range
[0, len(Kbase)-1]).
"""
Tbase = []
if len(Kbase) > 0:
# Sample for each base category a number images such that the total
# number sampled images of all categories to be equal to `nTestBase`.
KbaseIndices = np.random.choice(
np.arange(len(Kbase)), size=nTestBase, replace=True)
KbaseIndices, NumImagesPerCategory = np.unique(
KbaseIndices, return_counts=True)
for Kbase_idx, NumImages in zip(KbaseIndices, NumImagesPerCategory):
imd_ids = self.sampleImageIdsFrom(
Kbase[Kbase_idx], sample_size=NumImages)
Tbase += [(img_id, Kbase_idx) for img_id in imd_ids]
assert(len(Tbase) == nTestBase)
return Tbase
def sample_train_and_test_examples_for_novel_categories(
self, Knovel, nTestNovel, nExemplars, nKbase):
"""Samples train and test examples of the novel categories.
Args:
Knovel: a list with the ids of the novel categories.
nTestNovel: the total number of test images that will be sampled
from all the novel categories.
nExemplars: the number of training examples per novel category that
will be sampled.
nKbase: the number of base categories. It is used as offset of the
category index of each sampled image.
Returns:
Tnovel: a list of length `nTestNovel` with 2-element tuples. The
1st element of each tuple is the image id that was sampled and
the 2nd element is its category label (which is in the range
[nKbase, nKbase + len(Knovel) - 1]).
Exemplars: a list of length len(Knovel) * nExemplars of 2-element
tuples. The 1st element of each tuple is the image id that was
sampled and the 2nd element is its category label (which is in
the ragne [nKbase, nKbase + len(Knovel) - 1]).
"""
if len(Knovel) == 0:
return [], []
nKnovel = len(Knovel)
Tnovel = []
Exemplars = []
assert((nTestNovel % nKnovel) == 0)
nEvalExamplesPerClass = int(nTestNovel / nKnovel)
for Knovel_idx in range(len(Knovel)):
imd_ids = self.sampleImageIdsFrom(
Knovel[Knovel_idx],
sample_size=(nEvalExamplesPerClass + nExemplars))
imds_tnovel = imd_ids[:nEvalExamplesPerClass]
imds_ememplars = imd_ids[nEvalExamplesPerClass:]
Tnovel += [(img_id, nKbase+Knovel_idx) for img_id in imds_tnovel]
Exemplars += [(img_id, nKbase+Knovel_idx) for img_id in imds_ememplars]
assert(len(Tnovel) == nTestNovel)
assert(len(Exemplars) == len(Knovel) * nExemplars)
random.shuffle(Exemplars)
return Tnovel, Exemplars
def sample_episode(self):
"""Samples a training episode."""
nKnovel = self.nKnovel
nKbase = self.nKbase
nTestNovel = self.nTestNovel
nTestBase = self.nTestBase
nExemplars = self.nExemplars
Kbase, Knovel = self.sample_base_and_novel_categories(nKbase, nKnovel)
Tbase = self.sample_test_examples_for_base_categories(Kbase, nTestBase)
Tnovel, Exemplars = self.sample_train_and_test_examples_for_novel_categories(
Knovel, nTestNovel, nExemplars, nKbase)
# concatenate the base and novel category examples.
Test = Tbase + Tnovel
random.shuffle(Test)
Kall = Kbase + Knovel
return Exemplars, Test, Kall, nKbase
def createExamplesTensorData(self, examples):
"""
Creates the examples image and label tensor data.
Args:
examples: a list of 2-element tuples, each representing a
train or test example. The 1st element of each tuple
is the image id of the example and 2nd element is the
category label of the example, which is in the range
[0, nK - 1], where nK is the total number of categories
(both novel and base).
Returns:
images: a tensor of shape [nExamples, Height, Width, 3] with the
example images, where nExamples is the number of examples
(i.e., nExamples = len(examples)).
labels: a tensor of shape [nExamples] with the category label
of each example.
"""
images = torch.stack(
[self.dataset[img_idx][0] for img_idx, _ in examples], dim=0)
labels = torch.LongTensor([label for _, label in examples])
return images, labels
def get_iterator(self, epoch=0):
rand_seed = epoch
random.seed(rand_seed)
np.random.seed(rand_seed)
def load_function(iter_idx):
Exemplars, Test, Kall, nKbase = self.sample_episode()
Xt, Yt = self.createExamplesTensorData(Test)
Kall = torch.LongTensor(Kall)
if len(Exemplars) > 0:
Xe, Ye = self.createExamplesTensorData(Exemplars)
return Xe, Ye, Xt, Yt, Kall, nKbase
else:
return Xt, Yt, Kall, nKbase
tnt_dataset = tnt.dataset.ListDataset(
elem_list=range(self.epoch_size), load=load_function)
data_loader = tnt_dataset.parallel(
batch_size=self.batch_size,
num_workers=(0 if self.is_eval_mode else self.num_workers),
shuffle=(False if self.is_eval_mode else True))
return data_loader
def __call__(self, epoch=0):
return self.get_iterator(epoch)
def __len__(self):
return int(self.epoch_size / self.batch_size)
| 41.137472
| 107
| 0.617259
|
00df031bfc33fae4fdd71f5c04b2d239dc6528d0
| 709
|
py
|
Python
|
examples/manipulate_userdata.py
|
tracivar/zegami-python-sdk
|
3d273d2c987ccf2d311733f29529e2c5b25beceb
|
[
"Apache-2.0"
] | null | null | null |
examples/manipulate_userdata.py
|
tracivar/zegami-python-sdk
|
3d273d2c987ccf2d311733f29529e2c5b25beceb
|
[
"Apache-2.0"
] | null | null | null |
examples/manipulate_userdata.py
|
tracivar/zegami-python-sdk
|
3d273d2c987ccf2d311733f29529e2c5b25beceb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Download images from a collection with a specific tag."""
from zegami_sdk.client import ZegamiClient
WORKSPACE_ID = ''
COLLECTION_ID = ''
USERNAME = ''
PASSWORD = ''
zc = ZegamiClient(username=USERNAME, password=PASSWORD)
workspace = zc.get_workspace_by_id(WORKSPACE_ID)
collection = workspace.get_collection_by_id(COLLECTION_ID)
# fetch userdata
userdata = collection.userdata
# add new userdata
userdata = collection.set_userdata({
'testString': 'string',
'testNumber': 1
})
# replace userdata
userdata = collection.set_userdata({
'testString': 'new string'
})
# remove userdata
userdata = collection.set_userdata({
'testString': None
})
print(userdata)
| 19.162162
| 60
| 0.732017
|
5d9ad098ecb4d7f5289f2ed25d27c5915fcf2f1d
| 279
|
py
|
Python
|
Act1/cssenv/Scripts/csaplication/login/urls.py
|
Talox1/Cliente-Servidor
|
fb243832684f818416c10fdcdaf397fccda09321
|
[
"MIT"
] | null | null | null |
Act1/cssenv/Scripts/csaplication/login/urls.py
|
Talox1/Cliente-Servidor
|
fb243832684f818416c10fdcdaf397fccda09321
|
[
"MIT"
] | null | null | null |
Act1/cssenv/Scripts/csaplication/login/urls.py
|
Talox1/Cliente-Servidor
|
fb243832684f818416c10fdcdaf397fccda09321
|
[
"MIT"
] | null | null | null |
from django.urls import path,re_path
from django.conf.urls import include
from django.contrib.auth.models import User
# from rest_framework import routers, serilizers, viewsets
from login.views import CustomAuthToken
urlpatterns = [
re_path(r'^',CustomAuthToken.as_view()),
]
| 31
| 58
| 0.802867
|
ef407def11be145f06cedacb996051e9e7391bf1
| 3,158
|
py
|
Python
|
SoftLayer/managers/ssl.py
|
corneil/softlayer-python
|
bbaf562fb76536c5cc652e356729723f38f48b66
|
[
"MIT"
] | 1
|
2019-11-06T13:54:07.000Z
|
2019-11-06T13:54:07.000Z
|
SoftLayer/managers/ssl.py
|
underscorephil/softlayer-python
|
567540a328d5258e55594466127cd22b9a04a2ea
|
[
"MIT"
] | null | null | null |
SoftLayer/managers/ssl.py
|
underscorephil/softlayer-python
|
567540a328d5258e55594466127cd22b9a04a2ea
|
[
"MIT"
] | 1
|
2020-07-07T12:18:26.000Z
|
2020-07-07T12:18:26.000Z
|
"""
SoftLayer.ssl
~~~~~~~~~~~~~
SSL Manager/helpers
:license: MIT, see LICENSE for more details.
"""
class SSLManager(object):
"""Manages SSL certificates.
:param SoftLayer.API.Client client: an API client instance
Example::
# Initialize the Manager.
# env variables. These can also be specified in ~/.softlayer,
# or passed directly to SoftLayer.Client()
# SL_USERNAME = YOUR_USERNAME
# SL_API_KEY = YOUR_API_KEY
import SoftLayer
client = SoftLayer.Client()
mgr = SoftLayer.SSLManager(client)
"""
def __init__(self, client):
self.client = client
self.ssl = self.client['Security_Certificate']
def list_certs(self, method='all'):
"""List all certificates.
:param string method: The type of certificates to list. Options are
'all', 'expired', and 'valid'.
:returns: A list of dictionaries representing the requested SSL certs.
Example::
# Get all valid SSL certs
certs = mgr.list_certs(method='valid')
print certs
"""
ssl = self.client['Account']
methods = {
'all': 'getSecurityCertificates',
'expired': 'getExpiredSecurityCertificates',
'valid': 'getValidSecurityCertificates'
}
mask = "mask[id, commonName, validityDays, notes]"
func = getattr(ssl, methods[method])
return func(mask=mask)
def add_certificate(self, certificate):
"""Creates a new certificate.
:param dict certificate: A dictionary representing the parts of the
certificate.
See developer.softlayer.com for more info.
Example::
cert = ??
result = mgr.add_certificate(certificate=cert)
"""
return self.ssl.createObject(certificate)
def remove_certificate(self, cert_id):
"""Removes a certificate.
:param integer cert_id: a certificate ID to remove
Example::
# Removes certificate with id 1234
result = mgr.remove_certificate(cert_id = 1234)
"""
return self.ssl.deleteObject(id=cert_id)
def edit_certificate(self, certificate):
"""Updates a certificate with the included options.
The provided dict must include an 'id' key and value corresponding to
the certificate ID that should be updated.
:param dict certificate: the certificate to update.
Example::
# Updates the cert id 1234
cert['id'] = 1234
cert['certificate'] = ??
result = mgr.edit_certificate(certificate=cert)
"""
return self.ssl.editObject(certificate, id=certificate['id'])
def get_certificate(self, cert_id):
"""Gets a certificate with the ID specified.
:param integer cert_id: the certificate ID to retrieve
Example::
cert = mgr.get_certificate(cert_id=1234)
print(cert)
"""
return self.ssl.getObject(id=cert_id)
| 27.701754
| 78
| 0.591514
|
ca394a2d66766faa097b16504926b432d2ddde2b
| 13,371
|
py
|
Python
|
siphon/tests/test_catalog.py
|
story645/siphon
|
d2d4584f953cc4034e2b60bcb0ced8bc7ec4fee5
|
[
"BSD-3-Clause"
] | null | null | null |
siphon/tests/test_catalog.py
|
story645/siphon
|
d2d4584f953cc4034e2b60bcb0ced8bc7ec4fee5
|
[
"BSD-3-Clause"
] | null | null | null |
siphon/tests/test_catalog.py
|
story645/siphon
|
d2d4584f953cc4034e2b60bcb0ced8bc7ec4fee5
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013-2017 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the catalog access API."""
from datetime import datetime
import logging
import pytest
from siphon.catalog import get_latest_access_url, TDSCatalog
from siphon.testing import get_recorder
log = logging.getLogger('siphon.catalog')
log.setLevel(logging.WARNING)
recorder = get_recorder(__file__)
@recorder.use_cassette('thredds-test-toplevel-catalog')
def test_basic():
"""Test of parsing a basic catalog."""
url = 'http://thredds-test.unidata.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert 'Forecast Model Data' in cat.catalog_refs
@recorder.use_cassette('thredds-test-toplevel-catalog')
def test_catalog_representation():
"""Test string representation of the catalog object."""
url = 'http://thredds-test.unidata.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert str(cat) == 'Unidata THREDDS Data Server'
@recorder.use_cassette('thredds-test-latest-gfs-0p5')
def test_access():
"""Test catalog parsing of access methods."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/grib/'
'NCEP/GFS/Global_0p5deg/latest.xml')
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.access_urls
@recorder.use_cassette('thredds-test-default-5-0')
def test_access_default_catalog():
"""Test case-insensitive parsing of access methods in default catalog."""
url = ('http://localhost:8081/thredds/catalog/catalog.xml')
cat = TDSCatalog(url)
ds = list(cat.datasets.values())[0]
assert 'OPENDAP' in ds.access_urls
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_virtual_access():
"""Test access of virtual datasets."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
# find the 2D time coordinate "full collection" dataset
for dataset in list(cat.datasets.values()):
if 'Full Collection' in dataset.name:
ds = dataset
break
assert 'OPENDAP' in ds.access_urls
# TwoD is a virtual dataset, so HTTPServer
# should not be listed here
assert 'HTTPServer' not in ds.access_urls
@recorder.use_cassette('latest_rap_catalog')
def test_get_latest():
"""Test latest dataset helper function."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/'
'grib/NCEP/RAP/CONUS_13km/catalog.xml')
latest_url = get_latest_access_url(url, 'OPENDAP')
assert latest_url
@recorder.use_cassette('latest_rap_catalog')
def test_latest_attribute():
"""Test using the catalog latest attribute."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/'
'grib/NCEP/RAP/CONUS_13km/catalog.xml')
cat = TDSCatalog(url)
assert cat.latest.name == 'RR_CONUS_13km_20150527_0100.grib2'
@recorder.use_cassette('top_level_cat')
def test_tds_top_catalog():
"""Test parsing top-level catalog."""
url = 'http://thredds.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert cat
@recorder.use_cassette('radar_dataset_cat')
def test_simple_radar_cat():
"""Test parsing of radar server catalog."""
url = 'http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/dataset.xml'
cat = TDSCatalog(url)
assert cat
@recorder.use_cassette('point_feature_dataset_xml')
def test_simple_point_feature_collection_xml():
"""Test accessing point feature top-level catalog."""
url = ('http://thredds.ucar.edu/thredds/catalog/nws/metar/ncdecoded/catalog.xml'
'?dataset=nws/metar/ncdecoded/Metar_Station_Data_fc.cdmr')
cat = TDSCatalog(url)
assert cat
@recorder.use_cassette('html_then_xml_catalog')
def test_html_link(recwarn):
"""Test that we fall-back when given an HTML catalog page."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/'
'grib/NCEP/RAP/CONUS_13km/catalog.html')
TDSCatalog(url)
assert 'Changing' in str(recwarn.pop(UserWarning).message)
@recorder.use_cassette('follow_cat')
def test_catalog_follow():
"""Test catalog reference following."""
url = 'http://thredds.ucar.edu/thredds/catalog.xml'
ref_name = 'Forecast Model Data'
cat = TDSCatalog(url).catalog_refs[ref_name].follow()
assert cat
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_order():
"""Test that we properly order datasets parsed from the catalog."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
assert list(cat.datasets) == ['Full Collection (Reference / Forecast Time) Dataset',
'Best NAM CONUS 20km Time Series',
'Latest Collection for NAM CONUS 20km']
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_get_by_index():
"""Test that datasets can be accessed by index."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
assert cat.datasets[0].name == 'Full Collection (Reference / Forecast Time) Dataset'
assert cat.datasets[1].name == 'Best NAM CONUS 20km Time Series'
assert cat.datasets[2].name == 'Latest Collection for NAM CONUS 20km'
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_str():
"""Test that datasets are printed as expected."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
assert str(cat.datasets) == ("['Full Collection (Reference / Forecast Time) Dataset', "
"'Best NAM CONUS 20km Time Series', "
"'Latest Collection for NAM CONUS 20km']")
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_sliced_str():
"""Test that datasets are printed as expected when sliced."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
assert str(cat.datasets[-2:]) == ('[Best NAM CONUS 20km Time Series, '
'Latest Collection for NAM CONUS 20km]')
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_nearest_time():
"""Test getting dataset by time using filenames."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
nearest = cat.catalog_refs.filter_time_nearest(datetime(2015, 5, 28, 17))
assert nearest.title == 'NAM_CONUS_20km_noaaport_20150528_1800.grib1'
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_nearest_time_30():
"""Test getting dataset by time; check for a day in the 30s (#gh-173)."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
nearest = cat.catalog_refs.filter_time_nearest(datetime(2015, 5, 30, 11))
assert nearest.title == 'NAM_CONUS_20km_noaaport_20150530_1200.grib1'
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_nearest_time_raises():
"""Test getting dataset by time using filenames."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
# Datasets doesn't have any timed datasets
with pytest.raises(ValueError):
cat.datasets.filter_time_nearest(datetime(2015, 5, 28, 17))
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_time_range():
"""Test getting datasets by time range using filenames."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
in_range = cat.catalog_refs.filter_time_range(datetime(2015, 5, 28, 0),
datetime(2015, 5, 29, 0))
titles = [item.title for item in in_range]
assert titles == ['NAM_CONUS_20km_noaaport_20150528_0000.grib1',
'NAM_CONUS_20km_noaaport_20150528_0600.grib1',
'NAM_CONUS_20km_noaaport_20150528_1200.grib1',
'NAM_CONUS_20km_noaaport_20150528_1800.grib1',
'NAM_CONUS_20km_noaaport_20150529_0000.grib1']
@recorder.use_cassette('top_level_20km_rap_catalog')
def test_datasets_time_range_raises():
"""Test getting datasets by time range using filenames."""
url = ('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/NAM/'
'CONUS_20km/noaaport/catalog.xml')
cat = TDSCatalog(url)
# No time-based dataset names
with pytest.raises(ValueError):
cat.datasets.filter_time_range(datetime(2015, 5, 28, 0), datetime(2015, 5, 29, 0))
@recorder.use_cassette('top_level_cat')
def test_catalog_ref_order():
"""Test that catalog references are properly ordered."""
url = 'http://thredds.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert list(cat.catalog_refs) == ['Forecast Model Data', 'Forecast Products and Analyses',
'Observation Data', 'Radar Data', 'Satellite Data',
'Unidata case studies']
@recorder.use_cassette('cat_non_standard_context_path')
def test_non_standard_context_path():
"""Test accessing TDS with non-standard Context Path."""
url = 'http://ereeftds.bom.gov.au/ereefs/tds/catalog/ereef/mwq/P1A/catalog.xml'
cat = TDSCatalog(url)
ds = cat.datasets['A20020101.P1A.ANN_MIM_RMP.nc']
expected = ('http://ereeftds.bom.gov.au/ereefs/tds/dodsC/ereef/mwq/'
'P1A/A20020101.P1A.ANN_MIM_RMP.nc')
assert ds.access_urls['OPENDAP'] == expected
@recorder.use_cassette('cat_access_elements')
def test_access_elements():
"""Test parsing access elements in TDS client catalog."""
url = 'http://oceandata.sci.gsfc.nasa.gov/opendap/SeaWiFS/L3SMI/2001/001/catalog.xml'
cat = TDSCatalog(url)
assert len(list(cat.datasets)) != 0
@recorder.use_cassette('cat_only_http')
def test_simple_service_within_compound():
"""Test parsing of a catalog that asks for a single service within a compound one."""
url = ('http://thredds-test.unidata.ucar.edu/thredds/catalog/noaaport/text/'
'tropical/atlantic/hdob/catalog.xml')
cat = TDSCatalog(url)
assert (cat.datasets[0].access_urls
== {'HTTPServer': 'http://thredds-test.unidata.ucar.edu/thredds/'
'fileServer/noaaport/text/tropical/atlantic/hdob/'
'High_density_obs_20170824.txt'})
@recorder.use_cassette('rsmas_ramadda')
def test_ramadda_catalog():
"""Test parsing a catalog from RAMADDA."""
url = 'http://weather.rsmas.miami.edu/repository?output=thredds.catalog'
cat = TDSCatalog(url)
assert len(cat.catalog_refs) == 12
@recorder.use_cassette('rsmas_ramadda_datasets')
def test_ramadda_access_urls():
"""Test creating access urls from a catalog from RAMADDA."""
url = 'http://weather.rsmas.miami.edu/repository?output=thredds.catalog'
# Walk down a few levels to where we can get a dataset
cat = (TDSCatalog(url).catalog_refs[0].follow().catalog_refs[0].follow()
.catalog_refs[0].follow())
ds = cat.datasets[3]
assert ds.access_urls['opendap'] == ('http://weather.rsmas.miami.edu/repository/opendap/'
'synth:a43c1cc4-1cf2-4365-97b9-6768b8201407:L3YyYl91c'
'2VzRUNPQS9keW5hbW9fYmFzaWNfdjJiXzIwMTFhbGwubmM='
'/entry.das')
@recorder.use_cassette('tds50_catalogref_follow')
def test_tds50_catalogref_follow():
"""Test following a catalog ref url on TDS 5."""
cat = TDSCatalog('http://thredds-test.unidata.ucar.edu/thredds/catalog.xml')
assert len(cat.catalog_refs[0].follow().catalog_refs) == 59
@recorder.use_cassette('top_level_cat')
def test_catalog_ref_str():
"""Test that catalog references are properly represented as strings."""
url = 'http://thredds.ucar.edu/thredds/catalog.xml'
cat = TDSCatalog(url)
assert str(cat.catalog_refs[0]) == 'Forecast Model Data'
@recorder.use_cassette('ncei_embedded_metadata')
def test_catalog_with_embedded_metadata_elements():
"""Test catalog with embedded metadata elements."""
url = 'https://www.ncei.noaa.gov/thredds/catalog/namanl/201802/20180220/catalog.xml'
cat = TDSCatalog(url)
md = cat.metadata
assert 'external_metadata' in md
assert 'serviceName' in md
@recorder.use_cassette('latest_resolver_on_latest_dataset')
def test_latest_resolver_fail():
"""Test getting latest on catalog that does not have a resolver."""
cat = TDSCatalog('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/GFS/'
'Global_0p25deg_ana/latest.xml')
latest = ''
with pytest.raises(AttributeError) as excinfo:
latest = cat.latest
assert latest == ''
assert '"latest" not available for this catalog' in str(excinfo.value)
| 39.559172
| 95
| 0.687234
|
6056888e31e0f6b32971566ba3dd54d4fbde30b2
| 3,202
|
bzl
|
Python
|
api/bazel/repository_locations.bzl
|
yuxiaobo96/envoy
|
789a95989f62ed1bbb2822c0f236300099f1bdb9
|
[
"Apache-2.0"
] | null | null | null |
api/bazel/repository_locations.bzl
|
yuxiaobo96/envoy
|
789a95989f62ed1bbb2822c0f236300099f1bdb9
|
[
"Apache-2.0"
] | null | null | null |
api/bazel/repository_locations.bzl
|
yuxiaobo96/envoy
|
789a95989f62ed1bbb2822c0f236300099f1bdb9
|
[
"Apache-2.0"
] | null | null | null |
BAZEL_SKYLIB_RELEASE = "0.8.0"
BAZEL_SKYLIB_SHA256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e"
OPENCENSUS_PROTO_GIT_SHA = "5cec5ea58c3efa81fa808f2bd38ce182da9ee731" # Jul 25, 2019
OPENCENSUS_PROTO_SHA256 = "faeb93f293ff715b0cb530d273901c0e2e99277b9ed1c0a0326bca9ec5774ad2"
PGV_GIT_SHA = "a18376249eb51cdd517f67fe8703897322812e6d" # Nov 5, 2019
PGV_SHA256 = "8e45a3582e7fa9d0005ad6ff1ed9208e793b847f1c455d2bbe5b1c580338ffaf"
GOOGLEAPIS_GIT_SHA = "82944da21578a53b74e547774cf62ed31a05b841" # Dec 2, 2019
GOOGLEAPIS_SHA = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405"
PROMETHEUS_GIT_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017
PROMETHEUS_SHA = "783bdaf8ee0464b35ec0c8704871e1e72afa0005c3f3587f65d9d6694bf3911b"
KAFKA_SOURCE_SHA = "ae7a1696c0a0302b43c5b21e515c37e6ecd365941f68a510a7e442eebddf39a1" # 2.2.0-rc2
UDPA_GIT_SHA = "a45f154471612140bc7f4a4d5abbc8a315848d7f" # Dec 12, 2019
UDPA_SHA256 = "03e794f7bae192930213622105bf9c6891e7de20c22deae12d8e92f54baca8c5"
ZIPKINAPI_RELEASE = "0.2.2" # Aug 23, 2019
ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b"
REPOSITORY_LOCATIONS = dict(
bazel_skylib = dict(
sha256 = BAZEL_SKYLIB_SHA256,
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel-skylib." + BAZEL_SKYLIB_RELEASE + ".tar.gz"],
),
com_envoyproxy_protoc_gen_validate = dict(
sha256 = PGV_SHA256,
strip_prefix = "protoc-gen-validate-" + PGV_GIT_SHA,
urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/" + PGV_GIT_SHA + ".tar.gz"],
),
com_google_googleapis = dict(
# TODO(dio): Consider writing a Skylark macro for importing Google API proto.
sha256 = GOOGLEAPIS_SHA,
strip_prefix = "googleapis-" + GOOGLEAPIS_GIT_SHA,
urls = ["https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_GIT_SHA + ".tar.gz"],
),
com_github_cncf_udpa = dict(
sha256 = UDPA_SHA256,
strip_prefix = "udpa-" + UDPA_GIT_SHA,
urls = ["https://github.com/cncf/udpa/archive/" + UDPA_GIT_SHA + ".tar.gz"],
),
prometheus_metrics_model = dict(
sha256 = PROMETHEUS_SHA,
strip_prefix = "client_model-" + PROMETHEUS_GIT_SHA,
urls = ["https://github.com/prometheus/client_model/archive/" + PROMETHEUS_GIT_SHA + ".tar.gz"],
),
opencensus_proto = dict(
sha256 = OPENCENSUS_PROTO_SHA256,
strip_prefix = "opencensus-proto-" + OPENCENSUS_PROTO_GIT_SHA + "/src",
urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/" + OPENCENSUS_PROTO_GIT_SHA + ".tar.gz"],
),
kafka_source = dict(
sha256 = KAFKA_SOURCE_SHA,
strip_prefix = "kafka-2.2.0-rc2/clients/src/main/resources/common/message",
urls = ["https://github.com/apache/kafka/archive/2.2.0-rc2.zip"],
),
com_github_openzipkin_zipkinapi = dict(
sha256 = ZIPKINAPI_SHA256,
strip_prefix = "zipkin-api-" + ZIPKINAPI_RELEASE,
urls = ["https://github.com/openzipkin/zipkin-api/archive/" + ZIPKINAPI_RELEASE + ".tar.gz"],
),
)
| 48.515152
| 158
| 0.732667
|
9dbe6b8ab778cbd94d61248fc74b976bdb72f82b
| 1,127
|
py
|
Python
|
medium/781-rabbits-in-forest.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | 2
|
2021-03-14T11:38:26.000Z
|
2021-03-14T11:38:30.000Z
|
medium/781-rabbits-in-forest.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | null | null | null |
medium/781-rabbits-in-forest.py
|
wanglongjiang/leetcode
|
c61d2e719e81575cfb5bde9d64e15cee7cf01ef3
|
[
"MIT"
] | 1
|
2022-01-17T19:33:23.000Z
|
2022-01-17T19:33:23.000Z
|
'''
森林中的兔子
森林中,每个兔子都有颜色。其中一些兔子(可能是全部)告诉你还有多少其他的兔子和自己有相同的颜色。我们将这些回答放在 answers 数组里。
返回森林中兔子的最少数量。
示例:
输入: answers = [1, 1, 2]
输出: 5
解释:
两只回答了 "1" 的兔子可能有相同的颜色,设为红色。
之后回答了 "2" 的兔子不会是红色,否则他们的回答会相互矛盾。
设回答了 "2" 的兔子为蓝色。
此外,森林中还应有另外 2 只蓝色兔子的回答没有包含在数组中。
因此森林中兔子的最少数量是 5: 3 只回答的和 2 只没有回答的。
输入: answers = [10, 10, 10]
输出: 11
输入: answers = []
输出: 0
说明:
answers 的长度最大为1000。
answers[i] 是在 [0, 999] 范围内的整数。
'''
from typing import List
'''
思路:一次遍历。(官方给这个思路叫贪心算法)
一个兔子说的数字是除自己以外的兔子数量n,那么跟这个兔子同色的兔子共n+1只,n一共会出现n+1次。
故每个数字如果不满足数字n出现n+1次,需要补足n+1。
时间复杂度:O(n)
空间复杂度:O(n)
'''
class Solution:
def numRabbits(self, answers: List[int]) -> int:
nums = {}
# 统计每个数字出现的次数
for a in answers:
if a in nums:
nums[a] += 1
else:
nums[a] = 1
count = len(answers) # 兔子的总数量
for num, c in nums.items():
d, r = divmod(c, num + 1)
if r > 0:
count += num + 1 - r # 如果数字出现的次数不是num+1的整数倍,缺少的兔子需要补充
return count
s = Solution()
print(s.numRabbits([1, 1, 2]))
print(s.numRabbits([10, 10, 10]))
print(s.numRabbits([]))
| 19.77193
| 70
| 0.597161
|
636fac92cd5a53da0dbc4ae3081c8311393376d1
| 4,086
|
py
|
Python
|
text_based_sorts/test.py
|
AshuHK/Sorting_Visualization
|
24d4edbda1ef15f950dc2acf27e93e005c6c0522
|
[
"MIT"
] | null | null | null |
text_based_sorts/test.py
|
AshuHK/Sorting_Visualization
|
24d4edbda1ef15f950dc2acf27e93e005c6c0522
|
[
"MIT"
] | null | null | null |
text_based_sorts/test.py
|
AshuHK/Sorting_Visualization
|
24d4edbda1ef15f950dc2acf27e93e005c6c0522
|
[
"MIT"
] | 1
|
2020-05-04T23:43:23.000Z
|
2020-05-04T23:43:23.000Z
|
# import the sorts here
from InsertionSort import insertion_sort
from SelectionSort import selection_sort
from BubbleSort import bubble_sort
from QuickSort import quick_sort
from MergeSort import merge_sort
# used to generate the rules
import random
import time
def generate_results(test_list, total_time, sort_type):
"""
Takes the information from the test functions and builds the results
into a string for readability
:param test_list: Python list that is ideally sorted
:param total_time: Time object that is total time of the sort
:param sort_type: String of the done to get the result
"""
# create an empty string
result_str = ""
# add the appropriate string based on if the list is sorted
if test_list == sorted(test_list):
result_str += "Test: Successful\t"
else:
result_str += "Test: Fail\t"
# build the final string with the sort type given
result_str += "{} sort time: {:5f} seconds".format(sort_type, total_time)
return result_str
def test_bubble(user_int):
# build the test list
test_list = [i for i in range(user_int)]
random.shuffle(test_list)
# time tracking of the sort
start_time = time.time()
bubble_sort(test_list)
final_time = time.time()
# generate and print results
total_time = final_time - start_time
result_str = generate_results(test_list, total_time, " Bubble")
print(result_str)
return None
def test_insertion(user_int):
# build the test list
test_list = [i for i in range(user_int)]
random.shuffle(test_list)
# time tracking of the sort
start_time = time.time()
insertion_sort(test_list, 0, len(test_list) - 1)
final_time = time.time()
# generate and print results
total_time = final_time - start_time
result_str = generate_results(test_list, total_time, "Insertion")
print(result_str)
return None
def test_selection(user_int):
# build the test list
test_list = [i for i in range(user_int)]
random.shuffle(test_list)
# time tracking of the sort
start_time = time.time()
selection_sort(test_list)
final_time = time.time()
# generate and print results
total_time = final_time - start_time
result_str = generate_results(test_list, total_time, "Selection")
print(result_str)
return None
def test_quick(user_int):
# build the test list
test_list = [i for i in range(user_int)]
random.shuffle(test_list)
# time tracking of the sort
start_time = time.time()
quick_sort(test_list, 0, len(test_list) - 1)
final_time = time.time()
# generate and print results
total_time = final_time - start_time
result_str = generate_results(test_list, total_time, " Quick")
print(result_str)
return None
def test_merge(user_int):
# build the test list
test_list = [i for i in range(user_int)]
random.shuffle(test_list)
# time tracking of the sort
start_time = time.time()
merge_sort(test_list)
final_time = time.time()
# generate and print results
total_time = final_time - start_time
result_str = generate_results(test_list, total_time, " Merge")
print(result_str)
return None
def main():
# print a warning for the user about the O(n^2) algorithms
warning_str = """
The first 3 sorts in this program (bubble, insertion, and selection)
will take a significant amount of time if you input something greater
than 20,000.
"""
print(warning_str)
# take input for the size
try:
user_int = int(input("\nInput the size of the list to be generated: "))
if user_int < 0:
user_int *= -1
except ValueError:
# sets a default size as exception handling
user_int = 1000
# run the test suite
print("\n")
test_bubble(user_int)
test_insertion(user_int)
test_selection(user_int)
test_quick(user_int)
test_merge(user_int)
print("\n")
return None
if __name__ == "__main__":
main()
| 23.084746
| 79
| 0.679148
|
115bbe7e5ca5952a8bc3797ada001c2c9855137e
| 17,668
|
py
|
Python
|
serv.py
|
pindos11/2017_17-01_project
|
725f334594c609aa57b31c3bc55ccbe4fec14dc5
|
[
"MIT"
] | null | null | null |
serv.py
|
pindos11/2017_17-01_project
|
725f334594c609aa57b31c3bc55ccbe4fec14dc5
|
[
"MIT"
] | 1
|
2017-12-27T17:28:17.000Z
|
2017-12-27T17:28:17.000Z
|
serv.py
|
pindos11/2017_17-01_project
|
725f334594c609aa57b31c3bc55ccbe4fec14dc5
|
[
"MIT"
] | null | null | null |
from multiprocessing import Process
import socket,time,sqlite3,time,os,random
try:
import serv_settings
except:
f = open("serv_settings.py","w")
t = '''
IP_ADDRESS = ""
PORT = 9090
KEY_DECAY_TIME = 3600
ONLINE_CONFIRM_TIME = 600
LOGGING = 1
LOGFOLDER = "logs"
MAX_CLIENTS = 100
'''
f.write(t)
f.close()
import serv_settings
def error_p(errmsg):
print("Error: "+errmsg)
def logmsg(logmsg):
a = time.strftime("%H-%M %d %h %Y")
print(a+": "+logmsg)
def add_nulls(dlen,data):
to_ret = data
if(len(data)<dlen):
dif = dlen-len(data)
to_ret = "0"*dif+to_ret
return to_ret
class logging_db:
def __init__(self):
cur,connection = self.connect_to_db()
cur.execute('''
CREATE TABLE IF NOT EXISTS errlogs(
ADDR TEXT,
DATE TEXT,
ERROR TEXT)
''')
cur.execute('''
CREATE TABLE IF NOT EXISTS traffics(
ADDR TEXT,
TYPE TEXT,
AMOUNT INTEGER)
''')#amount in bytes
connection.commit()
connection.close()
#todo
#make traffic counting
#make error logging here
#traffic counted by dates and users
#tables must be wiped sometimes))))00000
def add_traffic(self,addr,ttype,amount):
cur,connection = self.connect_to_db()
cur.execute("SELECT AMOUNT FROM traffics WHERE ADDR = ? AND TYPE = ?",(addr,ttype))
d = cur.fetchone()
if(d!=None):
num = d[0]
num+=amount
cur.execute("UPDATE traffics SET AMOUNT = ? WHERE ADDR = ? AND TYPE = ?",(num,addr,ttype))
else:
cur.execute("INSERT INTO traffics VALUES (?,?,?)",(addr,ttype,amount))
connection.commit()
connection.close()
def add_error(self,addr,error):
date = time.time()
cur,connection = self.connect_to_db()
cur.execute("INSERT INTO errlogs VALUES (?,?,?)",(addr,str(date),error))
connection.commit()
connection.close()
def connect_to_db(self):
try:
connection = sqlite3.connect(serv_settings.LOGFOLDER+"/"+"log.db")
except:
os.mkdir(serv_settings.LOGFOLDER)
connection = sqlite3.connect(serv_settings.LOGFOLDER+"/"+"log.db")
cur = connection.cursor()
return(cur,connection)
class dbwork:
#1)login,password,id - logindata
#2)id,key,time_to_change_key - keys
#3)id,time_to_offline - onlines
#4)id,unread_messages - messages
def __init__(self):
cur,connection = self.connect_to_db()
#creating tables
cur.execute('''
CREATE TABLE IF NOT EXISTS logindata(
ID INTEGER PRIMARY KEY,
LOGIN TEXT,
PASSWORD TEXT)
''')
cur.execute('''
CREATE TABLE IF NOT EXISTS keys(
ID INTEGER,
KEY INTEGER,
DTIME INTEGER)
''')
cur.execute('''
CREATE TABLE IF NOT EXISTS onlines(
ID INTEGER PRIMARY KEY,
OTIME INTEGER)
''')
cur.execute('''
CREATE TABLE IF NOT EXISTS messages(
ID INTEGER,
MESSAGE BLOB)
''')#ID of the user to recive message
connection.commit()
connection.close()
def connect_to_db(self):
connection = sqlite3.connect("chatdb.db")
cur = connection.cursor()
return(cur,connection)
def generate_key(self,ID): #generates a new key for given ID
random.seed()
cur,connection = self.connect_to_db()
key = random.randint(10000000,99999999)
ok = 0
while(ok==0): #generating a unique key for messaging
cur.execute("SELECT * FROM keys WHERE KEY = ?",(key,))
if(cur.fetchone()==None):
ok = 1
break
else:
key = random.randint(10000000,99999999)
cur.execute("SELECT * FROM keys WHERE ID = ?",(ID,)) #checking if the
dtime = time.time()+serv_settings.KEY_DECAY_TIME #ID in table
if(cur.fetchone()==None):
cur.execute("INSERT INTO keys VALUES (?,?,?)",(ID,key,dtime))
else:
cur.execute("UPDATE keys SET KEY = ?, DTIME = ? WHERE ID = ?",(key,dtime,ID))
connection.commit()
connection.close()
return(key)
def get_messages(self,ID):
cur,connection = self.connect_to_db()
cur.execute("SELECT MESSAGE FROM messages WHERE ID = ?",(ID,))
msgs = cur.fetchall()
msgdata = b""
if(msgs==[]):
connection.close()
return(0)
else:
for msg in msgs:
message = msg[0]
msgdata+=message
cur.execute("DELETE FROM messages WHERE ID = ?",(ID,))
connection.commit()
connection.close()
return(msgdata)
def get_ID_by_login(self,login):
cur,connection = self.connect_to_db()
cur.execute("SELECT ID FROM logindata WHERE LOGIN = ?",(login,))
ID = cur.fetchone()[0]
connection.close()
return(ID)
def get_key(self,ID): #returns a key for given ID
cur,connection = self.connect_to_db()
cur.execute("SELECT KEY FROM keys WHERE ID = ?",(ID,))
key = cur.fetchone()
if(key!=None):
key=key[0]
connection.close()
return(key)
def get_key_dtime(self,ID):
cur,connection = self.connect_to_db()
cur.execute("SELECT DTIME FROM keys WHERE ID = ?",(ID,))
dtime = cur.fetchone()
if(dtime!=None):
dtime=dtime[0]
connection.close()
return(dtime)
def get_ID_by_key(self,key):
cur,connection = self.connect_to_db()
cur.execute("SELECT ID, DTIME FROM keys WHERE key = ?",(key,))
uid = cur.fetchone()
connection.close()
if(uid==None):
return(-1)#no such key
else:
dtime = uid[1]
uid = uid[0]
if(dtime<time.time()):
return(-2)#timed out key
else:
return(uid)
def update_user_online(self,ID):
otime = time.time()+serv_settings.ONLINE_CONFIRM_TIME
cur,connection = self.connect_to_db()
cur.execute("REPLACE INTO onlines VALUES(?,?)",(ID,otime))
connection.commit()
connection.close()
def get_users_online(self):
ctime = time.time()
cur,connection = self.connect_to_db()
cur.execute("SELECT ID FROM onlines WHERE OTIME > ?",(ctime,))
onlineIDs = cur.fetchall()
onlines = []
if(onlineIDs == []):
return([])
else:
for oid in onlineIDs:
onlines.append(oid[0])
return(onlines)
def add_message(self,ID,msg):
cur,connection = self.connect_to_db()
cur.execute("INSERT INTO messages VALUES(?,?)",(ID,msg))
connection.commit()
connection.close()
def login(self,login,password):
cur,connection = self.connect_to_db()
cur.execute("SELECT * FROM logindata WHERE LOGIN = ?",(login,))
udata = cur.fetchone()
if(udata==None):
cur.execute("INSERT INTO logindata VALUES (NULL,?,?)",(login,password))
connection.commit()
connection.close()
ID = self.get_ID_by_login(login)
key = self.generate_key(ID)
self.update_user_online(ID)
return([0,key]) #OK - new registered
else:
if(udata[2]==password):
connection.close()
ID = self.get_ID_by_login(login)
key = self.generate_key(ID)
self.update_user_online(ID)
return([0,key]) #OK - ok login&pwd
else:
connection.close()
return([1,0]) #login already exists(it means - password incorrect)
class client_job:
def send_close(self,data):
self.send_msg(data)
self.conn.close()
def send_msg(self,data):
count = 0
try:
try:
self.conn.send(data.encode("utf-8"))
count = len(data.encode("utf-8"))
except:
self.conn.send(data)
count = len(data)
except:
self.write_log("error sending to: "+str(self.addr))
return
self.count_traffic(count,"out")
def answer_ask_chk(self):
self.ID = self.database.get_ID_by_key(self.key)
if(self.ID>0):
dtime = self.database.get_key_dtime(self.ID)
dtime-=time.time()
dtime = add_nulls(4,str(dtime))
self.database.update_user_online(self.ID)
self.send_msg("OK"+dtime)
return(-1)
else:
if(self.ID==-1):
self.error = 5 #wrong key
else:
self.error = 6 #key timed out
return(0)
def read_ask_msg(self,data):
try:
ask_m = data.decode("utf-8")
except:
self.error = 1 #encoding failure
return(0)
if(len(ask_m)!=11):
self.error = 3 #message badly structured
return(0)
mtype = ask_m[:3]
if(mtype=="CHK"):
try:
self.key = int(ask_m[3:])
except:
self.error = 5
return(0)
return self.answer_ask_chk()
if(mtype=="MES"):
try:
self.protocol = int(ask_m[3:7])
ret_bytes = int(ask_m[7:])
except:
self.error = 1
return(0)
return ret_bytes
else:
self.error = 2 #unknown initiation
return(0)
def check_key_ID(self):
realID = self.database.get_ID_by_key(self.key)
if(realID==self.ID):
return(1)
else:
if(realID==-1):
self.error = 5
return(0)
else:
self.error = 6
return(0)
def process_message(self,trg_ID,message):
if(self.check_key_ID()==1):
if(self.database.get_key(trg_ID)==None):
self.error = 9
return(0)
else:
msglen = str(add_nulls(4,str(len(message)))).encode("utf-8")
sender = str(add_nulls(8,str(self.ID))).encode("utf-8")
dbmsg = sender+msglen+message #add a sender's ID and msglen
self.database.add_message(trg_ID,dbmsg)
self.database.update_user_online(self.ID)
msg = "OK"+add_nulls(16,"")
self.send_msg(msg)
return(-1)
else:
self.error = 5
return(0)
def read_msg(self,data,len_m):
if(len(data)!=len_m):
self.error = 3
return(0)
try:
mtype = data[:3].decode("utf-8")
except:
self.error = 1
return(0)
if(data[-3:].decode("utf-8")!="FIN"):
self.error = 3
return(0)
if(mtype=="MSG"):#messages can be not a unicode str
if(self.protocol==1):
try:
self.ID = int(data[3:11].decode("utf-8"))
self.key = int(data[11:19].decode("utf-8"))
trg_ID = int(data[19:27].decode("utf-8"))
except:
self.error = 3
return(0)
msg = data[27:-3]
return(self.process_message(trg_ID,msg))
else:
self.error = 4 #protocol mismatch
return(0)
try:
cl_data = data.decode("utf-8")
except:
self.error = 1
return(0)
if(cl_data[-3:]!="FIN"):
self.error = 3
return(0)
mtype = cl_data[:3]
if(mtype=="LOG"):
if(self.protocol==1):
try:
llen = int(cl_data[3:7])
plen = int(cl_data[7:11])
except:
self.error = 3
return(0)
self.login = cl_data[11:11+llen]
self.password = cl_data[11+llen:11+llen+plen]
result = self.database.login(self.login,self.password)
if(result[0]==0):
self.key = result[1]
self.ID = self.database.get_ID_by_key(self.key)
if(self.ID<0):
self.error = 7 #internal error???? must not happen
return(0)
else:
msg = "OK"+add_nulls(8,str(self.key))+add_nulls(8,str(self.ID))
self.send_msg(msg)
return(-1)
else:
self.error = 8 #wrong password for existing login
return(0)
else:
self.error = 4
return(0)
if(mtype=="UPD"):
if(self.protocol==1):
try:
self.key = int(cl_data[3:11])
except:
self.error = 5
return(0)
self.ID = self.database.get_ID_by_key(self.key)
if(self.ID>0):
msgdata = self.database.get_messages(self.ID)
if(msgdata==0):
self.error = 10
return(0)
else:
msgdata = msgdata
msg = "OK"+add_nulls(16,str(len(msgdata)))
self.database.update_user_online(self.ID)
self.send_msg(msg)
self.send_msg(msgdata)
return(-1)
else:
self.error = 5
return(0)
else:
self.error = 4
return(0)
if(mtype=="WOL"):
if(self.protocol==1):
onlines = self.database.get_users_online()
if(onlines==[]):
self.error = 11
return(0)
outmsg = ""
for oid in onlines:
outmsg+=add_nulls(8,str(oid))
outmsg = outmsg.encode("utf-8")
lenmsg = len(outmsg)
msg = "OK"+add_nulls(16,str(lenmsg))
self.send_msg(msg)
self.send_msg(outmsg)
return(-1)
else:
self.error = 4
return(0)
def write_log(self,errtext):
if(serv_settings.LOGGING==1 and self.error!=10 and self.error!=11):
self.log.add_error(self.addr[0],errtext)
outstr="Error ID: "+str(self.error)+" "
outstr+="Error text: "+errtext+"\n"
print(outstr)
def count_traffic(self,num,traffic_type):
if(serv_settings.LOGGING==1):
self.log.add_traffic(self.addr[0],traffic_type,num)
def work_with_client(self,conn,addr):
self.log = logging_db()
self.database = dbwork()
self.conn = conn
self.addr = addr
self.ID = ""
self.key = ""
self.login = ""
self.password = ""
self.protocol = 0
self.error = 0 #zero is for unknown error
try:
data = self.conn.recv(11)
except:
print(1)
to_recieve = self.read_ask_msg(data)
if(to_recieve==0):
bmsg = "BA"+add_nulls(4,str(self.error))
self.send_close(bmsg.encode("utf-8"))
self.write_log("initiation bad from: "+str(self.addr))
return
elif(to_recieve==-1):
self.conn.close()
return
else:
self.send_msg("OK0000".encode("utf-8"))
data = conn.recv(to_recieve)
to_recieve = self.read_msg(data,to_recieve)
if(to_recieve==0):
bmsg = "BA"+add_nulls(16,str(self.error))
self.send_close(bmsg.encode("utf-8"))
self.write_log("shit with message from: "+str(self.addr))
else:
self.conn.close()
#KEEP IN MIND THAT MULTIPROCESS DOES NOT WORK IN IDLE, START THE SERVER
#USING A DOUBLECLICK ON PY FILE
def start_process(conne,addre):
job = client_job()
job.work_with_client(conne,addre)
if(__name__=="__main__"):
sock = socket.socket()
sock.bind((serv_settings.IP_ADDRESS, serv_settings.PORT))
sock.listen(serv_settings.MAX_CLIENTS)
while True:
conn, addr = sock.accept()
answ = Process(target=start_process,args=(conn,addr))
answ.start()
answ.join()
| 33.462121
| 103
| 0.482001
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.