content stringlengths 5 1.05M |
|---|
import fiftyone.zoo as foz
import what.utils.logger as log
import torch
from torch.utils.data import DataLoader
from what.models.detection.datasets.fiftyone import FiftyOneDataset
from what.models.detection.datasets.voc import VOCDataset
from what.models.detection.ssd.ssd.ssd import MatchPrior
from what.models.detection.ssd.ssd.preprocessing import TrainAugmentation, TestTransform
from what.models.detection.ssd.ssd import mobilenet_ssd_config
from what.models.detection.ssd.mobilenet_v2_ssd_lite import MobileNetV2SSDLite
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
logger = log.get_logger(__name__)
if __name__ == '__main__':
pretrained_ssd = None # or use "models/mobilenet-v2-ssd-lite-mp-0_686.pth"
checkpoint_folder = "checkpoint/"
batch_size = 32
num_workers = 0
# Load MobileNetSSD configuration
config = mobilenet_ssd_config
train_transform = TrainAugmentation(config.image_size, config.image_mean, config.image_std)
target_transform = MatchPrior(config.priors, config.center_variance, config.size_variance, 0.5)
test_transform = TestTransform(config.image_size, config.image_mean, config.image_std)
# Visulize the VOC2012 dataset
# session = fo.launch_app(foz.load_zoo_dataset("voc-2012", split="train"))
# session.wait()
# Load Training Dataset from FiftyOne
train_dataset = FiftyOneDataset(foz.load_zoo_dataset("voc-2012", split="train"),
foz.load_zoo_dataset_info("voc-2012").classes,
transform=train_transform,
target_transform=target_transform)
# Load Training Dataset from Local Disk
# train_dataset = VOCDataset("VOC2012/", transform=train_transform,
# target_transform=target_transform)
train_loader = DataLoader(train_dataset, batch_size,
num_workers=num_workers,
shuffle=True)
logger.info("Train dataset size: {}".format(len(train_dataset)))
# Load Validation Dataset from FiftyOne (use voc-2007 train as validation here)
val_dataset = FiftyOneDataset(foz.load_zoo_dataset("voc-2007", split="train"),
foz.load_zoo_dataset_info("voc-2007").classes,
transform=test_transform,
target_transform=target_transform)
# Load Validation Dataset from Local Disk
# val_dataset = VOCDataset("VOC2007/", transform=test_transform,
# target_transform=target_transform, is_test=True)
val_loader = DataLoader(val_dataset, batch_size,
num_workers=num_workers,
shuffle=False)
logger.info("validation dataset size: {}".format(len(val_dataset)))
# Create SSD network and load pretrained base net.
model = MobileNetV2SSDLite(is_test=False, class_names=train_dataset.classes)
model.train(train_loader, val_loader, device=device, num_epochs=5, debug_steps=10, validation_epochs=1,
pretrained_ssd = pretrained_ssd, checkpoint_folder = checkpoint_folder)
|
from conans import ConanFile, CMake, tools
import os
required_conan_version = ">=1.43.0"
class GhcFilesystemRecipe(ConanFile):
name = "ghc-filesystem"
description = "A header-only single-file std::filesystem compatible helper library"
topics = ("ghc-filesystem", "header-only", "filesystem")
homepage = "https://github.com/gulrak/filesystem"
url = "https://github.com/conan-io/conan-center-index"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = CMake(self)
cmake.definitions["GHC_FILESYSTEM_BUILD_TESTING"] = False
cmake.definitions["GHC_FILESYSTEM_BUILD_EXAMPLES"] = False
cmake.definitions["GHC_FILESYSTEM_WITH_INSTALL"] = True
cmake.configure(source_folder=self._source_subfolder)
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "ghc_filesystem")
self.cpp_info.set_property("cmake_target_name", "ghcFilesystem::ghc_filesystem")
# TODO: back to global scope in conan v2 once cmake_find_package_* generators removed
self.cpp_info.components["ghc_filesystem"].bindirs = []
self.cpp_info.components["ghc_filesystem"].frameworkdirs = []
self.cpp_info.components["ghc_filesystem"].libdirs = []
self.cpp_info.components["ghc_filesystem"].resdirs = []
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.filenames["cmake_find_package"] = "ghc_filesystem"
self.cpp_info.filenames["cmake_find_package_multi"] = "ghc_filesystem"
self.cpp_info.names["cmake_find_package"] = "ghcFilesystem"
self.cpp_info.names["cmake_find_package_multi"] = "ghcFilesystem"
self.cpp_info.components["ghc_filesystem"].names["cmake_find_package"] = "ghc_filesystem"
self.cpp_info.components["ghc_filesystem"].names["cmake_find_package_multi"] = "ghc_filesystem"
self.cpp_info.components["ghc_filesystem"].set_property("cmake_target_name", "ghcFilesystem::ghc_filesystem")
|
import os
import png
from PIL import Image
class Printer(object):
def __init__(self):
self.on = True
def __call__(self, message, *stuff):
if self.on:
print(message.format(*stuff))
return stuff[0]
##################################
# IO THPS Scene Image Correction #
##################################
def shift_row_right(row, amount):
shifted_right = []
shifted_right.extend(row[amount*-4:])
shifted_right.extend(row[0:amount*-4])
return shifted_right
def shift_rows_down(image, amount):
new_image = image.copy()
for _ in range(amount):
shifted_down = []
shifted_down.append(new_image[-1])
shifted_down.extend(new_image[0:-1])
new_image = shifted_down
return new_image
def shift_col_down(image, col, amount, height):
col_to_shift = []
col_start = col * 4
for i in range(0, height):
col_to_shift.extend(image[i][col_start:col_start+4])
col_shifted_up = shift_row_right(col_to_shift, amount)
new_image = []
for i in range(0, height):
if col != 0:
new_image.append(image[i][0:col_start])
else:
new_image.append([])
new_image[i].extend(col_shifted_up[i*4:i*4+4])
new_image[i].extend(image[i][col_start+4:])
return new_image
def fix_pixel_data(width, height, pixels):
initial_image = []
for i in range(0, height):
cur_row = []
for i in reversed(range(i * width, (i + 1) * width)):
cur_row.extend(pixels[i])
shifted_right = shift_row_right(cur_row, 1)
initial_image.append(shifted_right)
shifted_down = shift_rows_down(initial_image, 1)
return shift_col_down(shifted_down, 0, -1, height)
def write_to_png(ui, filename, pvr, pixels):
postprocess = False
final_image = pixels
ui.files_extracted += 1
filename_without_extension = "".join(filename.split(".")[0:-1])
if ui.create_sub_dirs:
output_dir = os.path.join(ui.output_dir, filename_without_extension)
else:
output_dir = ui.output_dir
output_path = os.path.join(output_dir, f"{filename_without_extension}_{pvr.header_offset:#0{8}x}.png")
if(pvr.pal_size != 65536):
final_image = fix_pixel_data(pvr.width, pvr.height, pixels)
elif((pvr.palette & 0xFF00) in [0x100, 0xd00]):
postprocess = True
elif((pvr.palette & 0xFF00) == 0x400):
output_path = output_path[0:-4] + "_i" + output_path[-4:] # Mark unsupported textures with _i
os.makedirs(os.path.dirname(output_path), exist_ok=True)
file = open(output_path, 'wb')
writer = png.Writer(pvr.width, pvr.height, greyscale=False, alpha=True)
writer.write(file, final_image)
file.close()
if(postprocess):
texture = Image.open(output_path)
out = texture.rotate(270, expand=True)
out = out.transpose(Image.FLIP_LEFT_RIGHT)
out.save(output_path)
|
import subprocess
CMD = "heroku config:get DATABASE_URL -a benzak"
RESULT = subprocess.run(CMD.split(" "), capture_output=True)
DATABASE_URL = RESULT.stdout.decode().strip()
import dj_database_url
P = dj_database_url.parse(DATABASE_URL)
print(f"\n\n<><><><><><><><>\n{P['PASSWORD']}\n<><><><><><><><>\n\n")
print(f"psql -h {P['HOST']} -p {P['PORT']} -d {P['NAME']} -U {P['USER']} -W")
|
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from datacube.api import API
def test_get_descriptor_no_data():
from mock import MagicMock
mock_index = MagicMock()
api = API(index=mock_index)
descriptor = api.get_descriptor({})
assert descriptor == {}
def test_get_descriptor_some_data():
from mock import MagicMock
band_10 = MagicMock(dtype='int16', )
my_dict = {'band10': band_10}
def getitem(name):
return my_dict[name]
def setitem(name, val):
my_dict[name] = val
mock_measurements = MagicMock()
mock_measurements.__getitem__.side_effect = getitem
mock_measurements.__setitem__.side_effect = setitem
su = MagicMock()
su.storage_type.dimensions.return_value = ['t', 'x', 'y']
su.storage_type.measurements = mock_measurements
su.coordinates.items
su.storage_type.name
su.variables.values.return_value = ['t', 'x', 'y']
mock_index = MagicMock()
# mock_index.datasets.get_fields.return_value = dict(product=None)
mock_index.storage.search.return_value = [su]
api = API(index=mock_index)
descriptor = api.get_descriptor({})
assert descriptor == {}
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import InputRequired, Email, Length
class LoginForm(FlaskForm):
username = StringField('username', validators=[InputRequired(), Length(min=4, max=80)],render_kw={"type":"email", "class":"form-control", "id":"inputEmail", "placeholder":"Email Address"})
password = PasswordField('password', validators=[InputRequired(), Length(min=2, max=80)],render_kw={"type":"password", "class":"form-control", "id":"inputPassword","placeholder":"Password"})
remember = BooleanField('remember me')
|
from django.test import TestCase
from django.db import models
from app.tests.factories.todos import TodoFactory
class TodoModelTest(TestCase):
"""
This class defines the test suite for the todo model.
"""
todo = None
@classmethod
def setUpTestData(cls):
# Set up non-modified objects used by all test methods
TodoModelTest.todo = TodoFactory.build()
def test_title_field(self):
field = TodoModelTest.todo._meta.get_field("title")
self.assertIsInstance(field, models.CharField)
self.assertEqual(field.max_length, 100)
self.assertFalse(field.blank)
def test_owner_field(self):
field = TodoModelTest.todo._meta.get_field("owner")
self.assertIsInstance(field, models.CharField)
self.assertEqual(field.max_length, 100)
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_created_field(self):
field = TodoModelTest.todo._meta.get_field("created")
self.assertIsInstance(field, models.DateTimeField)
self.assertTrue(field.auto_now_add)
def test_modified_field(self):
field = TodoModelTest.todo._meta.get_field("modified")
self.assertIsInstance(field, models.DateTimeField)
self.assertTrue(field.auto_now)
def test_model_returns_readable_representation(self):
"""Test a readable string is returned for the model instance."""
self.assertEqual(str(TodoModelTest.todo), TodoModelTest.todo.title)
|
# Generated by Django 3.0.6 on 2020-06-23 12:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('My_Account', '0024_auto_20200622_1517'),
]
operations = [
migrations.CreateModel(
name='Stars_Votes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Star', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Star', to=settings.AUTH_USER_MODEL)),
('Vote', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Vote', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notifications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Notification_Type', models.CharField(max_length=64)),
('Notification_Text', models.CharField(max_length=240)),
('Notification_Status', models.CharField(max_length=32)),
('username', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Followers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_Friend', models.BooleanField(default=False)),
('Follower', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Follower', to=settings.AUTH_USER_MODEL)),
('username', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='Username', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import backtrader as bt
import numpy as np
from scipy.stats import linregress
diff_her = 90
class TestStrategy(bt.Strategy):
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders
self.order = None
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log('BUY EXECUTED, %.2f' % order.executed.price)
elif order.issell():
self.log('SELL EXECUTED, %.2f' % order.executed.price)
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Write down: no pending order
self.order = None
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] < self.dataclose[-1]:
# current close less than previous close
if self.dataclose[-1] < self.dataclose[-2]:
# previous close less than the previous close
# BUY, BUY, BUY!!! (with default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
# Already in the market ... we might sell
if len(self) >= (self.bar_executed + 5):
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
class Momentum(bt.Indicator):
lines = ('trend',)
params = (('period', diff_her),)
def __init__(self):
self.addminperiod(self.params.period)
def next(self):
returns = np.log(self.data.get(size=self.p.period))
x = np.arange(len(returns))
slope, _, rvalue, _, _ = linregress(x, returns)
annualized = (1 + slope) ** 252
self.lines.trend[0] = annualized * (rvalue ** 2)
class MainStrategy(bt.Strategy):
def __init__(self):
self.i = 0
self.inds = {}
self.spy = self.datas[0]
self.stocks = self.datas[1:]
self.spy_sma200 = bt.indicators.SimpleMovingAverage(self.spy.close,
period=200)
for d in self.stocks:
self.inds[d] = {}
self.inds[d]["momentum"] = Momentum(d.close,
period=diff_her)
self.inds[d]["sma100"] = bt.indicators.SimpleMovingAverage(d.close, period=100)
self.inds[d]["atr20"] = bt.indicators.ATR(d, period=20)
def prenext(self):
# call next() even when data is not available for all tickers
self.next()
def next(self):
if self.i % 5 == 0:
self.rebalance_portfolio()
if self.i % 10 == 0:
self.rebalance_positions()
self.i += 1
def rebalance_portfolio(self):
# only look at data that we can have indicators for
self.rankings = list(filter(lambda d: len(d) > 100, self.stocks))
self.rankings.sort(key=lambda d: self.inds[d]["momentum"][0])
num_stocks = len(self.rankings)
# sell stocks based on criteria
for i, d in enumerate(self.rankings):
if self.getposition(self.data).size:
if i > num_stocks * 0.2 or d < self.inds[d]["sma100"]:
self.close(d)
if self.spy < self.spy_sma200:
return
# buy stocks with remaining cash
for i, d in enumerate(self.rankings[:int(num_stocks * 0.2)]):
cash = self.broker.get_cash()
value = self.broker.get_value()
if cash <= 0:
break
if not self.getposition(self.data).size:
size = value * 0.001 / self.inds[d]["atr20"]
a = self.buy(d, size=size)
def rebalance_positions(self):
num_stocks = len(self.rankings)
if self.spy < self.spy_sma200:
return
# rebalance all stocks
for i, d in enumerate(self.rankings[:int(num_stocks * 0.2)]):
cash = self.broker.get_cash()
value = self.broker.get_value()
if cash <= 0:
break
size = value * 0.001 / self.inds[d]["atr20"]
self.order_target_size(d, size)
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log('BUY EXECUTED, %.2f' % order.executed.price)
elif order.issell():
self.log('SELL EXECUTED, %.2f' % order.executed.price)
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Write down: no pending order
self.order = None
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt)) |
def <caret>f(x, y):
"""
Summary.
Returns:
""" |
#!/usr/bin/python3
## -*- coding: utf-8 -*-
import learn_math
AppData = {
'TITLE': 'Learning-Python, Python Samples and Templates',
'DESC': 'A simple python project template sample.',
'AUTHOR': 'naturestone',
'VERSION': '1.0.1'
}
##########################################################################
# TEST Procedure
def test():
print("Modul ist running.")
##########################################################################
# MAIN Procedure
def main():
# Standardausgabe
print("\n{0}\nVersion: {2}\nAutor: {1}\n".format(AppData['TITLE'], AppData['AUTHOR'], AppData['VERSION']))
# Eingabe der Funktion / des Bausteins, der ausgeführt werden soll
load_func = input("Running module: ")
try:
type_modul = type(load_func)
print('Loading: {} is from type {}\n'.format(load_func, type_modul))
# Prüfe, ob eingegebener Name vom Typ 'function' ist type(test())
if (type_modul == type(test())):
# Ausführen der Funktion und zurück
return load_func()
except:
# Fehlerhandling
print('Module not found.')
exit(2)
finally:
print('Complete.')
##########################################################################
# MAIN Program
if __name__ == "__main__":
main() |
from functools import partial
from PySide import QtGui, QtCore
from jukeboxcore.gui.widgets.reftrackwidget_ui import Ui_ReftrackWidget
from jukeboxcore.gui.widgets.optionselector_ui import Ui_OptionSelector
from jukeboxcore.gui.widgets.browser import ComboBoxBrowser
from jukeboxcore.gui.widgetdelegate import WidgetDelegate
from jukeboxcore.gui.main import JB_Dialog, get_icon
from jukeboxcore.gui.reftrackitemdata import REFTRACK_OBJECT_ROLE
class OptionSelector(JB_Dialog, Ui_OptionSelector):
"""Widget to select options when importing or referencing
"""
def __init__(self, reftrack, parent=None):
"""Initialize a new OptionSelector
:param reftrack: the reftrack to show options for
:type reftrack: :class:`jukeboxcore.reftrack.Reftrack`
:param parent: the parent widget
:type parent: :class:`QtGui.QWidget`
:raises: None
"""
super(OptionSelector, self).__init__(parent)
self.setupUi(self)
self.selected = None
self.reftrack = reftrack
self.setup_ui()
self.setup_signals()
options = reftrack.get_options()
self.browser.set_model(options)
columns = self.reftrack.get_option_columns()
for i, c in enumerate(columns):
self.browser.get_level(i).setModelColumn(c)
self.adjustSize()
def setup_ui(self, ):
"""Setup the ui
:returns: None
:rtype: None
:raises: None
"""
labels = self.reftrack.get_option_labels()
self.browser = ComboBoxBrowser(len(labels), headers=labels)
self.browser_vbox.addWidget(self.browser)
def setup_signals(self, ):
"""Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
"""
self.select_pb.clicked.connect(self.select)
def select(self, ):
"""Store the selected taskfileinfo self.selected and accept the dialog
:returns: None
:rtype: None
:raises: None
"""
s = self.browser.selected_indexes(self.browser.get_depth()-1)
if not s:
return
i = s[0].internalPointer()
if i:
tfi = i.internal_data()
self.selected = tfi
self.accept()
class ReftrackWidget(Ui_ReftrackWidget, QtGui.QFrame):
"""Widget to display Reftracks in a Widgetdelegate
"""
def __init__(self, parent=None):
"""Initialize a new ReftrackWidget
:param parent: widget parent
:type parent: QtGui.QWidget
:raises: None
"""
super(ReftrackWidget, self).__init__(parent)
self.setupUi(self)
self.reftrack = None
self.setup_ui()
self.setup_signals()
self.upper_fr_default_bg_color = self.upper_fr.palette().color(QtGui.QPalette.Window)
def setup_ui(self, ):
"""Setup the ui
:returns: None
:rtype: None
:raises: None
"""
self.setup_icons()
def setup_icons(self, ):
"""Setup the icons of the ui
:returns: None
:rtype: None
:raises: None
"""
iconbtns = [("menu_border_24x24.png", self.menu_tb),
("duplicate_border_24x24.png", self.duplicate_tb),
("delete_border_24x24.png", self.delete_tb),
("reference_border_24x24.png", self.reference_tb),
("load_border_24x24.png", self.load_tb),
("unload_border_24x24.png", self.unload_tb),
("replace_border_24x24.png", self.replace_tb),
("import_border_24x24.png", self.importref_tb),
("import_border_24x24.png", self.importtf_tb),
("alien.png", self.alien_tb),
("imported.png", self.imported_tb)]
for iconname, btn in iconbtns:
i = get_icon(iconname, asicon=True)
btn.setIcon(i)
def setup_signals(self, ):
"""Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
"""
self.duplicate_tb.clicked.connect(self.duplicate)
self.delete_tb.clicked.connect(self.delete)
self.load_tb.clicked.connect(self.load)
self.unload_tb.clicked.connect(self.unload)
self.reference_tb.clicked.connect(self.reference)
self.importtf_tb.clicked.connect(self.import_file)
self.importref_tb.clicked.connect(self.import_reference)
self.replace_tb.clicked.connect(self.replace)
self.imported_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.imported_tb))
self.alien_tb.clicked.connect(partial(self.toggle_tbstyle, button=self.alien_tb))
def set_index(self, index):
"""Display the data of the given index
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
self.index = index
self.reftrack = index.model().index(index.row(), 18, index.parent()).data(REFTRACK_OBJECT_ROLE)
self.set_maintext(self.index)
self.set_identifiertext(self.index)
self.set_type_icon(self.index)
self.disable_restricted()
self.hide_restricted()
self.set_top_bar_color(self.index)
self.set_status_buttons()
self.set_menu()
def set_maintext(self, index):
"""Set the maintext_lb to display text information about the given reftrack
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.DisplayRole
text = ""
model = index.model()
for i in (1, 2, 3, 5, 6):
new = model.index(index.row(), i, index.parent()).data(dr)
if new is not None:
text = " | ".join((text, new)) if text else new
self.maintext_lb.setText(text)
def set_identifiertext(self, index):
"""Set the identifier text on the identifier_lb
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.DisplayRole
t = index.model().index(index.row(), 17, index.parent()).data(dr)
if t is None:
t = -1
else:
t = t+1
self.identifier_lb.setText("#%s" % t)
def set_type_icon(self, index):
"""Set the type icon on type_icon_lb
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
icon = index.model().index(index.row(), 0, index.parent()).data(QtCore.Qt.DecorationRole)
if icon:
pix = icon.pixmap(self.type_icon_lb.size())
self.type_icon_lb.setPixmap(pix)
else:
self.type_icon_lb.setPixmap(None)
def disable_restricted(self, ):
"""Disable the restricted buttons
:returns: None
:rtype: None
:raises: None
"""
todisable = [(self.reftrack.duplicate, self.duplicate_tb),
(self.reftrack.delete, self.delete_tb),
(self.reftrack.reference, self.reference_tb),
(self.reftrack.replace, self.replace_tb),]
for action, btn in todisable:
res = self.reftrack.is_restricted(action)
btn.setDisabled(res)
def hide_restricted(self, ):
"""Hide the restricted buttons
:returns: None
:rtype: None
:raises: None
"""
tohide = [((self.reftrack.unload, self.unload_tb),
(self.reftrack.load, self.load_tb)),
((self.reftrack.import_file, self.importtf_tb),
(self.reftrack.import_reference, self.importref_tb))]
for (action1, btn1), (action2, btn2) in tohide:
res1 = self.reftrack.is_restricted(action1)
res2 = self.reftrack.is_restricted(action2)
if res1 != res2:
btn1.setEnabled(True)
btn1.setHidden(res1)
btn2.setHidden(res2)
else: # both are restricted, then show one but disable it
btn1.setDisabled(True)
btn1.setVisible(True)
btn2.setVisible(False)
def set_top_bar_color(self, index):
"""Set the color of the upper frame to the background color of the reftrack status
:param index: the index
:type index: :class:`QtGui.QModelIndex`
:returns: None
:rtype: None
:raises: None
"""
dr = QtCore.Qt.ForegroundRole
c = index.model().index(index.row(), 8, index.parent()).data(dr)
if not c:
c = self.upper_fr_default_bg_color
self.upper_fr.setStyleSheet('background-color: rgb(%s, %s, %s)' % (c.red(), c.green(), c.blue()))
def set_status_buttons(self, ):
"""Depending on the status of the reftrack, enable or disable
the status buttons, for imported/alien status buttons
:returns: None
:rtype: None
:raises: None
"""
imported = self.reftrack.status() == self.reftrack.IMPORTED
alien = self.reftrack.alien()
for btn, enable in [(self.imported_tb, imported),
(self.alien_tb, alien)]:
btn.setEnabled(enable)
btn.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
def toggle_tbstyle(self, button):
"""Toogle the ToolButtonStyle of the given button between :data:`ToolButtonIconOnly` and :data:`ToolButtonTextBesideIcon`
:param button: a tool button
:type button: :class:`QtGui.QToolButton`
:returns: None
:rtype: None
:raises: None
"""
old = button.toolButtonStyle()
if old == QtCore.Qt.ToolButtonIconOnly:
new = QtCore.Qt.ToolButtonTextBesideIcon
else:
new = QtCore.Qt.ToolButtonIconOnly
button.setToolButtonStyle(new)
def set_menu(self, ):
"""Setup the menu that the menu_tb button uses
:returns: None
:rtype: None
:raises: None
"""
self.menu = QtGui.QMenu(self)
actions = self.reftrack.get_additional_actions()
self.actions = []
for a in actions:
if a.icon:
qaction = QtGui.QAction(a.icon, a.name, self)
else:
qaction = QtGui.QAction(a.name, self)
qaction.setCheckable(a.checkable)
qaction.setChecked(a.checked)
qaction.setEnabled(a.enabled)
qaction.triggered.connect(a.action)
self.actions.append(qaction)
self.menu.addAction(qaction)
self.menu_tb.setMenu(self.menu)
def get_taskfileinfo_selection(self, ):
"""Return a taskfileinfo that the user chose from the available options
:returns: the chosen taskfileinfo
:rtype: :class:`jukeboxcore.filesys.TaskFileInfo`
:raises: None
"""
sel = OptionSelector(self.reftrack)
sel.exec_()
return sel.selected
def duplicate(self, ):
"""Duplicate the current reftrack
:returns: None
:rtype: None
:raises: None
"""
self.reftrack.duplicate()
def delete(self, ):
"""Delete the current reftrack
:returns: None
:rtype: None
:raises: None
"""
self.reftrack.delete()
def load(self):
"""Load the current reftrack
:returns: None
:rtype: None
:raises: None
"""
self.reftrack.load()
def unload(self, ):
"""Unload the current reftrack
:returns: None
:rtype: None
:raises: None
"""
self.reftrack.unload()
def reference(self, ):
"""Reference a file
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.reference(tfi)
def import_file(self, ):
"""Import a file
:returns: None
:rtype: None
:raises: NotImplementedError
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.import_file(tfi)
def import_reference(self, ):
"""Import the referenec of the current reftrack
:returns: None
:rtype: None
:raises: None
"""
self.reftrack.import_reference()
def replace(self, ):
"""Replace the current reftrack
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.replace(tfi)
class ReftrackDelegate(WidgetDelegate):
"""A delegate for drawing a :class:`jukeboxcore.gui.reftrackitemdata.ReftrackItemData`
"""
def __init__(self, parent=None):
"""Initialize a new ReftrackDelegate
:param parent:
:type parent:
:raises: None
"""
super(ReftrackDelegate, self).__init__(parent)
def create_widget(self, parent=None):
"""Return a widget that should get painted by the delegate
You might want to use this in :meth:`WidgetDelegate.createEditor`
:returns: The created widget | None
:rtype: QtGui.QWidget | None
:raises: None
"""
return ReftrackWidget(parent)
def set_widget_index(self, index):
"""Set the index for the widget. The widget should retrieve data from the index and display it.
You might want use the same function as for :meth:`WidgetDelegate.setEditorData`.
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
self.widget.set_index(index)
def create_editor_widget(self, parent, option, index):
"""Return the editor to be used for editing the data item with the given index.
Note that the index contains information about the model being used.
The editor's parent widget is specified by parent, and the item options by option.
:param parent: the parent widget
:type parent: QtGui.QWidget
:param option: the options for painting
:type option: QtGui.QStyleOptionViewItem
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
return self.create_widget(parent)
def setEditorData(self, editor, index):
"""Sets the contents of the given editor to the data for the item at the given index.
Note that the index contains information about the model being used.
:param editor: the editor widget
:type editor: QtGui.QWidget
:param index: the index to paint
:type index: QtCore.QModelIndex
:returns: None
:rtype: None
:raises: None
"""
editor.set_index(index)
|
__version__ = "0.1.0b3"
|
# Copyright (c) Niall Asher 2022
# disabling PyUnresolvedReferences for the import.
# noinspection PyUnresolvedReferences
from socialserver.util.test import (
create_comment_with_request,
create_user_with_request,
server_address,
test_db,
create_post_with_request,
)
from socialserver.constants import CommentFeedSortTypes, ErrorCodes
import requests
# TODO: Like sort testing will need to be implemented when comment likes are!
def test_get_comment_feed_empty(server_address, test_db):
post_id = create_post_with_request(test_db.access_token)
r = requests.get(
f"{server_address}/api/v3/comments/feed",
json={
"count": 10,
"offset": 0,
"sort": CommentFeedSortTypes.CREATION_TIME_DESCENDING.value,
"post_id": post_id,
},
headers={"Authorization": f"bearer {test_db.access_token}"},
)
assert r.status_code == 200
assert len(r.json()["comments"]) == 0
assert r.json()["meta"]["reached_end"] is True
def test_get_comment_feed(server_address, test_db):
post_id = create_post_with_request(test_db.access_token)
comment_count = 15
for i in range(0, comment_count):
create_comment_with_request(test_db.access_token, post_id)
r = requests.get(
f"{server_address}/api/v3/comments/feed",
json={
"count": 10,
"offset": 0,
"sort": CommentFeedSortTypes.CREATION_TIME_DESCENDING.value,
"post_id": post_id,
},
headers={"Authorization": f"bearer {test_db.access_token}"},
)
assert r.status_code == 200
assert r.json()["meta"]["reached_end"] is False
assert len(r.json()["comments"]) == 10
def test_get_comment_feed_check_total_count(server_address, test_db):
post_id = create_post_with_request(test_db.access_token)
comment_count = 15
for i in range(0, comment_count):
create_comment_with_request(test_db.access_token, post_id)
r = requests.get(
f"{server_address}/api/v3/comments/feed",
json={
"count": 10,
"offset": 0,
"sort": CommentFeedSortTypes.CREATION_TIME_DESCENDING.value,
"post_id": post_id,
},
headers={"Authorization": f"bearer {test_db.access_token}"},
)
assert r.status_code == 200
assert r.json()["meta"]["comment_count"] == comment_count
def test_get_comment_feed_count_higher_than_comment_count(server_address, test_db):
post_id = create_post_with_request(test_db.access_token)
comment_count = 15
for i in range(0, comment_count):
create_comment_with_request(test_db.access_token, post_id)
r = requests.get(
f"{server_address}/api/v3/comments/feed",
json={
"count": 20,
"offset": 0,
"sort": CommentFeedSortTypes.CREATION_TIME_DESCENDING.value,
"post_id": post_id,
},
headers={"Authorization": f"bearer {test_db.access_token}"},
)
assert r.status_code == 200
assert r.json()["meta"]["reached_end"] is True
assert len(r.json()["comments"]) == 15
def test_get_comment_feed_invalid_post_id(server_address, test_db):
r = requests.get(
f"{server_address}/api/v3/comments/feed",
json={
"count": 10,
"offset": 0,
"sort": CommentFeedSortTypes.CREATION_TIME_DESCENDING.value,
"post_id": 1337,
},
headers={"Authorization": f"bearer {test_db.access_token}"},
)
assert r.status_code == 404
assert r.json()["error"] == ErrorCodes.POST_NOT_FOUND.value
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Simple tool to convert Time Zones boundaries defined in multipolygon tz_world shapefile
(see http://efele.net/maps/tz/world/) to a SQLite database for quicker Location->Time Zone mapping.
Database contains a single table which may be merged to existing database.
"""
import argparse
import os
import sys
import sqlite3
import array
import shapefile
__author__ = 'Andrey Maslyuk'
__license__ = 'MIT'
__version__ = '1.0.0'
COORD_ENCODER = 10000000
COORD_MAX = 200 * COORD_ENCODER
COORD_MIN = -200 * COORD_ENCODER
COORD_DELIMITER = COORD_MAX
def shapefile_to_sqlite(dbfilebase, sqlfile, verbose):
if verbose:
print('Opening tz_world...')
db = shapefile.Reader(dbfilebase)
if verbose:
print('processing...')
conn = sqlite3.connect(sqlfile)
conn.execute('drop index if exists tztools_tz0_bounds;')
conn.execute('drop table if exists tztools_tz0;')
conn.commit()
conn.execute('create table tztools_tz0(' +
'minlon integer, minlat integer, maxlon integer, maxlat integer,' +
'name text primary key, region blob);')
conn.commit()
# The database is a single table with data; can be safely merged into other DBs.
# All coordinates in DB are stored as 32-bit ints (degrees * COORD_ENCODER; gives ~1/2" precision).
# Each row is a complete TZ having name, bounding rectangle and regions.
# Regions blob is an array of polygons' vertices in form <Longitude, Latitude> stored sequentially;
# polygons are separated by a single divider value of COORD_DELIMITER (non-coordinate).
for idx in range(db.numRecords):
tzname = db.record(idx)[0]
sh = db.shape(idx)
if 5 == sh.shapeType:
minlon = int(COORD_MAX)
minlat = int(COORD_MAX)
maxlon = int(COORD_MIN)
maxlat = int(COORD_MIN)
region = array.array('l')
pos = len(sh.points) - 1
for lim in reversed(sh.parts):
for p in range(pos, lim, -1):
lon = int(sh.points[p][0] * COORD_ENCODER)
lat = int(sh.points[p][1] * COORD_ENCODER)
region.append(lon)
region.append(lat)
if lon < minlon:
minlon = lon
elif lon > maxlon:
maxlon = lon
if lat < minlat:
minlat = lat
elif lat > maxlat:
maxlat = lat
pos = lim - 1
region.append(int(COORD_DELIMITER)) # insert delimiter
region.pop() # remove last delimiter
conn.execute('insert into tztools_tz0(minlon, minlat, maxlon, maxlat, name, region) values(?,?,?,?,?,?);',
[minlon, minlat, maxlon, maxlat, tzname, sqlite3.Binary(region.tobytes())])
else:
if verbose:
print('Unknown shape type ' + str(sh.shapeType) + ' at ' + str(idx))
conn.commit()
conn.execute('create index tztools_tz0_bounds on tztools_tz0(minlon, minlat, maxlon, maxlat);')
conn.commit()
conn.close()
if verbose:
print('Done')
return 0
def main():
# arguments definition
argparser = argparse.ArgumentParser(description='Converts tz_world multipolygon shapefile ' +
'(see http://efele.net/maps/tz/world/) to indexed SQLite.')
argparser.add_argument('-db', metavar='directory',
help='tz_world database directory, defaults to "tz_world"',
default='tz_world')
argparser.add_argument('-out', metavar='sqlite_file',
help='output SQLite DB file path/name; defaults to "tz_world_mp.sqlite" in current directory',
default='tz_world_mp.sqlite')
argparser.add_argument('-m', action='store_true',
help='merge into existing SQLite DB; will re-create table "tz"')
argparser.add_argument('-f', action='store_true',
help='overwrite existing SQLite DB')
argparser.add_argument('-v', action='store_true',
help='verbose output')
args = argparser.parse_args()
# arguments checks
if not os.path.isdir(args.db):
print('The tz_world database directory does not exist: ' + args.db)
sys.exit(1)
dbfile = os.path.join(args.db, 'tz_world_mp')
dbfiletest = dbfile + '.shp'
if not os.path.isfile(dbfiletest):
print('The tz_world database does not exist: ' + dbfiletest)
sys.exit(1)
outdirtest = os.path.dirname(args.out)
if len(outdirtest) > 0 and not os.path.isdir():
print('The output directory does not exist: ' + outdirtest)
sys.exit(1)
if os.path.isfile(args.out):
if args.f:
os.remove(args.out)
elif not args.m:
print('The output file exists: ' + args.out)
sys.exit(3)
# processing
try:
ret = shapefile_to_sqlite(dbfile, args.out, args.v)
if ret != 0:
sys.exit(ret)
except:
print('Unexpected error: ', sys.exc_info()[0])
if __name__ == '__main__':
main()
|
from sqlalchemy import MetaData, Table, Column, UnicodeText
from sqlalchemy.sql.schema import CheckConstraint, ForeignKey
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
s = Table("service", meta, autoload=True)
instroduction = Column("introduction", UnicodeText)
instroduction.create(s)
def downgrade(migrate_engine):
meta.bind = migrate_engine
s = Table("service", meta, autoload=True)
s.c.instroduction.drop()
|
# -*- coding: utf-8 -*-
import os
from django.db import models
from django.contrib.auth.models import User
from django.db.utils import Error
from django.db.models.signals import post_delete
from django.dispatch import receiver
from django.utils.functional import cached_property
import json
# Create your models here.
def user_directory_path(instance, filename):
extension = os.path.splitext(filename)[1]
return 'images/users/photos_profile/{0}_{1}{2}'.format(instance.user.username, instance.user.id, extension)
class CoreUser(models.Model):
USER_TYPE_LOGIN = (
('MN', 'Manual'),
('FB', 'Facebook'),
('IG', 'Instagram'),
('GH', 'GitHub'),
('LD', 'LinkedIn'),
)
UF_CHOICES = [
("AC", "Acre"), ("AL", "Alagoas"), ("AM", "Amazonas"), ("AP", "Amapá"),
("CE", "Ceará"), ("DF", "Distrito"), ("ES", "Espírito Santo"),
("GO", "Goiânia"), ("MA", "Maranhão"), ("MG", "Minas Gerais"),
("MS", "Mato Grosso do Sul"), ("MT", "Mato Grosso"), ("PA", "Pará"),
("PB", "Paraíba"), ("PE", "Pernambuco"), ("PI", "Piauí"), ("PR", "Paraná"),
("RJ", "Rio de Janeiro"), ("RN", "Rio Grande do Norte"), ("RO", "Rondônia"),
("RR", "Roraima"), ("RS", "Rio Grande do Sul"), ("SC", "Santa Catarina"),
("SE", "Sergipe"), ("SP", "São Paulo"), ("TO", "Tocantins"),
]
pk_core_user = models.CharField(
max_length=20, primary_key=True, verbose_name='Doc. Identif.'
)
user = models.ForeignKey(User, on_delete=models.PROTECT)
user_photo = models.ImageField(
upload_to=user_directory_path, default='images/user.png', blank=True
)
user_uf = models.CharField(
max_length=2, choices=UF_CHOICES, verbose_name='UF'
)
user_city = models.CharField(max_length=100, verbose_name='Cidade')
type_login = models.CharField(
max_length=2, choices=USER_TYPE_LOGIN, verbose_name='Tipo Login'
)
@cached_property
def username(self):
return self.user.username
@cached_property
def password(self):
return self.user.password
# def _get_username(self):
# return self.user.username
#
# def _get_password(self):
# return self.user.password
# username = property(_get_username)
# password = property(_get_password)
class Meta:
db_table = 'core_user'
verbose_name = "Usuário"
def save(self, *args, **kwargs):
# Delete user_photo if exists
try:
obj = CoreUser.objects.get(pk_coreuser=self.pk_core_user)
if obj.user_photo != self.user_photo and obj.user_photo != 'images/user.png':
obj.user_photo.delete(save=False)
except Error:
pass
super(CoreUser, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % self.user
def __str__(self):
return u'%s' % self.user
class TypeDevice(models.Model):
FIELD_MAP_DEVICE_PARAMS = json.dumps({
"ga_ssid": "identity",
"ga_ap_mac": 'mac do ap',
"ga_nas_id": "chap-id",
"ga_srvr": "server-address",
"ga_cmac": "mac",
"ga_cip": "ip",
"ga_loc": "",
"ga_Qv": "chap-challenge",
"link-login": "link-login",
"link-orig": "link-orig",
"link-login-only": "link-login-only",
"link-orig-esc": "link-orig-esc",
})
pk_types_device = models.AutoField(primary_key=True, verbose_name='Código')
descr_device = models.CharField(max_length=100, verbose_name='Descrição')
supply_device = models.CharField(max_length=50, verbose_name='Marca')
model_device = models.CharField(max_length=50, verbose_name='Modelo')
flag_approve = models.BooleanField(default=False, verbose_name='Homologado')
field_map_device_params = models.TextField(
verbose_name='Parâmetros', default=FIELD_MAP_DEVICE_PARAMS
)
insert_date = models.DateTimeField(auto_now_add=True, verbose_name='Data de Inserção')
update_date = models.DateTimeField(null=True, blank=True, verbose_name='Data de Atualização')
class Meta:
db_table = 'core_types_device'
verbose_name = 'Tipos de Dispositivos'
class ServerDevices(models.Model):
pk_devices = models.CharField(
max_length=15, primary_key=True, verbose_name='Mac do Dispositivo'
)
fk_types_device = models.ForeignKey(
TypeDevice, on_delete=models.PROTECT, verbose_name='Tipo'
)
descr_device = models.CharField(max_length=100, verbose_name='Descrição')
descr_device = models.CharField(max_length=100, verbose_name='Descrição')
ga_nas_id = models.CharField(max_length=50, verbose_name='Ident. Diálogo')
ga_srvr = models.CharField(max_length=64, verbose_name='Ip do AP')
ga_loc = models.CharField(max_length=64, verbose_name='Localização')
insert_date = models.DateTimeField(
auto_now_add=True, verbose_name='Data de Inserção'
)
update_date = models.DateTimeField(
null=True, blank=True, verbose_name='Data de Atualização'
)
class Meta:
db_table = 'core_server_devices'
verbose_name = 'Autenticadores'
class UserDevices(models.Model):
pk_user_devices = models.AutoField(
primary_key=True, verbose_name='Código Dispositivo'
)
fk_server_devices = models.ForeignKey(
ServerDevices, on_delete=models.PROTECT
)
mac_address = models.CharField(
max_length=15, unique=True, verbose_name='Endereço Mac'
)
fk_user = models.ForeignKey(
CoreUser,
null=True,
blank=True,
on_delete=models.PROTECT,
verbose_name='Usuário'
)
ga_ssid = models.CharField(max_length=64, verbose_name='SSID conexão')
ga_cmac = models.CharField(max_length=15, verbose_name='Mac Cliente')
ga_cip = models.CharField(max_length=15, verbose_name='IP Cliente')
ga_Qv = models.CharField(max_length=128, verbose_name='autenticador')
link_login = models.CharField(max_length=128, verbose_name='Link Login')
link_orig = models.CharField(max_length=128, verbose_name='Link Original')
link_login_only = models.CharField(
max_length=128, verbose_name='Link Login Author'
)
link_orig_esc = models.CharField(
max_length=128, verbose_name='Link de Redirecionamento'
)
insert_date = models.DateTimeField(
auto_now_add=True, verbose_name='Data de Inserção'
)
update_date = models.DateTimeField(
null=True, blank=True, verbose_name='Data de Atualização'
)
class Meta:
db_table = 'core_user_devices'
verbose_name = 'Dispositivos'
class LogUserDevice(models.Model):
pk_log_users_devices = models.BigAutoField(
primary_key=True, verbose_name='Código'
)
fk_user_devices = models.ForeignKey(
UserDevices, on_delete=models.PROTECT, verbose_name='Usuário'
)
insert_date = models.DateTimeField(
auto_now_add=True, verbose_name='Data de Inserção'
)
@receiver(post_delete, sender=CoreUser)
def post_delete_handler(sender, instance, **kwargs):
# Not delete default user image 'user.png'
if instance.user_photo != 'images/user.png':
instance.user_photo.delete(False)
|
try:
from queue import Queue
except ImportError:
from Queue import Queue
from mirobot.socket_handler import SocketHandler
import time
import string
import random
import sys
import json
try:
import urllib.request as request
except ImportError:
import urllib2 as request
_sentinel = object()
class Mirobot:
def __init__(self, address = None, debug = False):
# Initialisation for the id field
self.nonce = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(4))
self.n = 0
self.debug = debug
# callbacks
self.__on_error = None
self.__on_collide = None
self.__on_follow = None
if address:
self.connect(address)
def connect(self, address):
# Set up the socket handling
self.__send_q = Queue()
self.recv_q = Queue()
self.socket = SocketHandler(address, self.__send_q, self.recv_q, debug=self.debug, sentinel = _sentinel)
self.socket.start()
# get the version once connected
self.version = self.__send('version')
def connectMenu(self, devices):
print("Select the Mirobot to connect to:")
for i, device in enumerate(devices):
print(" %i: %s" % (i+1, device['name']))
try:
choice = raw_input("Select a number:")
except:
choice = input("Select a number: ")
return choice
def autoConnect(self, id = None, interactive = False):
try:
res = request.urlopen("http://local.mirobot.io/devices.json").read()
except:
raise Exception("Could not connect to discovery server")
try:
devices = json.loads(str(res, 'utf-8'))
except TypeError:
devices = json.loads(res)
print(devices)
if interactive:
choice = self.connectMenu(devices['devices'])
print("Connecting to: %s" % devices['devices'][int(choice)-1]['name'])
self.connect(devices['devices'][int(choice)-1]['address'])
else:
if id:
filtered = [item for item in devices['devices'] if item['name'] == id]
if len(filtered) == 0:
raise Exception("No Mirobots found with id: %s" % id)
elif len(filtered) == 1:
# Connect to the only device we've found
self.connect(filtered[0]['address'])
else:
raise Exception("Multiple Mirobots found with id: %s" % id)
else:
if len(devices['devices']) == 0:
raise Exception("No Mirobots found")
elif len(devices['devices']) == 1:
# Connect to the only device we've found
self.connect(devices['devices'][0]['address'])
else:
raise Exception("Too many Mirobots found to auto connect without specifying an ID")
def errorNotify(self, on_error):
self.__on_error = on_error
def collideNotify(self, on_collide):
enabled = bool(on_collide)
self.__on_collide = on_collide
self.__send('collideNotify', ('false','true')[enabled])
def followNotify(self, on_follow):
enabled = bool(on_follow)
self.__on_follow = on_follow
self.__send('followNotify', ('false','true')[enabled])
def ping(self):
return self.__send('ping')
def uptime(self):
return self.__send('uptime')
def forward(self, distance):
return self.__send('forward', distance, distance/20)
def back(self, distance):
return self.__send('back', distance, distance/20)
def left(self, degrees):
return self.__send('left', degrees, degrees/20)
def right(self, degrees):
return self.__send('right', degrees, degrees/20)
def penup(self):
return self.__send('penup')
def pendown(self):
return self.__send('pendown')
def beep(self, milliseconds = 500):
return self.__send('beep', milliseconds, milliseconds / 500)
def collideState(self):
return self.__send('collideState')
def followState(self):
return self.__send('followState')
def disconnect(self):
self.__send_q.put(_sentinel)
def __send(self, cmd, arg = None, timeout = 1):
# Assemble the message
msg = {'cmd': cmd}
if (arg is not None):
msg['arg'] = str(arg)
# Send the message and handle exceptions
try:
return self.__send_or_raise(msg, timeout)
except Exception as x:
if not self.__on_error:
raise
return self.__on_error(x, msg, timeout, self)
def __send_or_raise(self, msg, timeout):
msg_id = msg['id'] = self.generate_id()
self.__send_q.put(msg)
deadline = timeout + time.time()
accepted = False
while True:
try:
timeout = max(1, deadline - time.time())
incoming = self.recv_q.get(block = True, timeout = timeout)
except KeyboardInterrupt as e:
self.disconnect()
raise e
except: # .get raises "Empty"
if (accepted):
raise IOError("Mirobot timed out awaiting completion of %r" % (msg,))
raise IOError("Mirobot timed out awaiting acceptance of %r" % (msg,))
try:
rx_id = incoming.get('id','???')
if rx_id != msg_id:
if (rx_id == 'collide'):
self.__collide(incoming)
continue
if (rx_id == 'follow'):
self.__follow(incoming)
continue
raise IOError("Received message ID (%s) does not match expected (%s)" % (rx_id, msg_id))
rx_status = incoming.get('status','???')
if rx_status == 'accepted':
accepted = True
elif rx_status == 'complete':
return incoming.get('msg',None)
elif rx_status == 'notify':
pass
else:
raise IOError("Received message status (%s) unexpected" % (rx_status,))
finally:
self.recv_q.task_done()
def __collide(self, msg):
if self.__on_collide:
left = msg['msg'] in ('both','left')
right = msg['msg'] in ('both','right')
self.__on_collide(left, right, msg, self)
def __follow(self, msg):
if self.__on_follow:
state = int(msg['msg'])
self.__on_follow(state, msg, self)
def generate_id(self):
self.n = (self.n + 1) % 0x10000
return '%s%04x' % (self.nonce, self.n)
|
import json
import pytest
from eth_account.messages import encode_defunct
import ape
from ape import convert
from ape.exceptions import AccountsError, ContractLogicError, SignatureError, TransactionError
ALIAS = "__FUNCTIONAL_TESTS_ALIAS__"
@pytest.fixture(autouse=True, scope="module")
def connected(eth_tester_provider):
yield
@pytest.fixture
def temp_ape_account(sender, keyparams, temp_accounts_path):
test_keyfile_path = temp_accounts_path / f"{ALIAS}.json"
if test_keyfile_path.exists():
# Corrupted from a previous test
test_keyfile_path.unlink()
test_keyfile_path.write_text(json.dumps(keyparams))
acct = ape.accounts.load(ALIAS)
sender.transfer(acct, "1 ETH") # Auto-fund this account
yield acct
if test_keyfile_path.exists():
test_keyfile_path.unlink()
def test_sign_message(test_accounts):
signer = test_accounts[2]
message = encode_defunct(text="Hello Apes!")
signature = signer.sign_message(message)
assert signer.check_signature(message, signature)
def test_sign_message_with_prompts(runner, temp_ape_account):
# "y\na\ny": yes sign, password, yes keep unlocked
with runner.isolation(input="y\na\ny"):
message = encode_defunct(text="Hello Apes!")
signature = temp_ape_account.sign_message(message)
assert temp_ape_account.check_signature(message, signature)
# # "n": don't sign
with runner.isolation(input="n\n"):
signature = temp_ape_account.sign_message(message)
assert signature is None
def test_transfer(sender, receiver):
initial_balance = receiver.balance
sender.transfer(receiver, "1 gwei")
expected = initial_balance + convert("1 gwei", int)
assert receiver.balance == expected
def test_transfer_with_prompts(runner, receiver, temp_ape_account):
# "y\na\ny": yes sign, password, yes keep unlocked
with runner.isolation("y\na\ny"):
receipt = temp_ape_account.transfer(receiver, "1 gwei")
assert receipt.receiver == receiver
# "n": don't sign
with runner.isolation(input="n\n"):
with pytest.raises(SignatureError):
temp_ape_account.transfer(receiver, "1 gwei")
def test_transfer_using_type_0(sender, receiver):
initial_balance = receiver.balance
sender.transfer(receiver, "1 gwei", type=0)
expected = initial_balance + convert("1 gwei", int)
assert receiver.balance == expected
def test_deploy(owner, contract_container):
contract_instance = owner.deploy(contract_container)
assert contract_instance.address
def test_contract_calls(owner, contract_instance):
contract_instance.set_number(2, sender=owner)
assert contract_instance.my_number() == 2
def test_contract_revert(sender, contract_instance):
# 'sender' is not the owner so it will revert (with a message)
with pytest.raises(ContractLogicError) as err:
contract_instance.set_number(5, sender=sender)
assert str(err.value) == "!authorized"
def test_contract_revert_no_message(owner, contract_instance):
# The Contract raises empty revert when setting number to 5.
with pytest.raises(ContractLogicError) as err:
contract_instance.set_number(5, sender=owner)
assert str(err.value) == "Transaction failed." # Default message
def test_send_transaction_with_bad_nonce(sender, receiver):
# Bump the nonce so we can set one that is too low.
sender.transfer(receiver, "1 gwei", type=0)
with pytest.raises(AccountsError) as err:
sender.transfer(receiver, "1 gwei", type=0, nonce=0)
assert str(err.value) == "Invalid nonce, will not publish."
def test_send_transaction_without_enough_funds(sender, receiver):
with pytest.raises(TransactionError) as err:
sender.transfer(receiver, "10000000000000 ETH")
assert "Sender does not have enough balance to cover" in str(err.value)
def test_send_transaction_sets_defaults(sender, receiver):
receipt = sender.transfer(receiver, "1 GWEI", gas_limit=None, required_confirmations=None)
assert receipt.gas_limit > 0
assert receipt.required_confirmations == 0
def test_accounts_splice_access(test_accounts):
a, b = test_accounts[:2]
assert a == test_accounts[0]
assert b == test_accounts[1]
c = test_accounts[-1]
assert c == test_accounts[len(test_accounts) - 1]
assert len(test_accounts[::2]) == len(test_accounts) / 2
def test_accounts_address_access(test_accounts, accounts):
assert accounts[test_accounts[0].address] == test_accounts[0]
def test_accounts_contains(accounts, test_accounts):
assert test_accounts[0].address in accounts
def test_autosign_messages(temp_ape_account):
temp_ape_account.set_autosign(True, passphrase="a")
message = encode_defunct(text="Hello Apes!")
signature = temp_ape_account.sign_message(message)
assert temp_ape_account.check_signature(message, signature)
def test_autosign_transactions(temp_ape_account, receiver):
temp_ape_account.set_autosign(True, passphrase="a")
assert temp_ape_account.transfer(receiver, "1 gwei")
def test_impersonate_not_implemented(accounts):
test_address = "0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045"
with pytest.raises(IndexError) as err:
_ = accounts[test_address]
expected_err_msg = (
"Your provider does not support impersonating accounts:\n"
f"No account with address '{test_address}'."
)
assert expected_err_msg in str(err.value)
def test_unlock_with_passphrase_and_sign_message(runner, temp_ape_account):
temp_ape_account.unlock(passphrase="a")
message = encode_defunct(text="Hello Apes!")
# y: yes, sign (note: unlocking makes the key available but is not the same as autosign).
with runner.isolation(input="y\n"):
signature = temp_ape_account.sign_message(message)
assert temp_ape_account.check_signature(message, signature)
def test_unlock_from_prompt_and_sign_message(runner, temp_ape_account):
# a = password
with runner.isolation(input="a\n"):
temp_ape_account.unlock()
message = encode_defunct(text="Hello Apes!")
# yes, sign the message
with runner.isolation(input="y\n"):
signature = temp_ape_account.sign_message(message)
assert temp_ape_account.check_signature(message, signature)
def test_unlock_with_passphrase_and_sign_transaction(runner, temp_ape_account, receiver):
temp_ape_account.unlock(passphrase="a")
# y: yes, sign (note: unlocking makes the key available but is not the same as autosign).
with runner.isolation(input="y\n"):
receipt = temp_ape_account.transfer(receiver, "1 gwei")
assert receipt.receiver == receiver
def test_unlock_from_prompt_and_sign_transaction(runner, temp_ape_account, receiver):
# a = password
with runner.isolation(input="a\n"):
temp_ape_account.unlock()
# yes, sign the transaction
with runner.isolation(input="y\n"):
receipt = temp_ape_account.transfer(receiver, "1 gwei")
assert receipt.receiver == receiver
|
from PIL import Image
import numpy as np
import os
INPUT_DIR_IMAGE = "images"
OUTPUT_DIR_IMAGE = os.path.join("images", "output")
def in_path(filename):
return os.path.join(INPUT_DIR_IMAGE, filename)
def getImage(filename):
image = Image.open(in_path(filename))
return image
def saveImage(file, filename):
dir = os.path.join(OUTPUT_DIR_IMAGE, filename)
file.save(dir)
def showImage(image):
imageToShow = image
imageToShow.show()
def showVertical(image1, image2):
imageVertical = Image.fromarray(np.vstack((np.array(image1), np.array(image2))))
imageVertical.show()
return imageVertical
def showHorizontal(image1, image2):
imageVertical = Image.fromarray(np.hstack((np.array(image1), np.array(image2))))
imageVertical.show()
return imageVertical
|
import os
import pprint
import sys
from logging import Filter, Handler, Logger
from typing import Iterable
from slack import WebClient
from slack.errors import SlackApiError
class SlackHandler(Handler):
def __init__(self, token, channel_id):
Handler.__init__(self)
self.client = WebClient(token)
self.channel_id = channel_id
def emit(self, record):
log_entry = self.format(record)
self.send_message(log_entry)
def send_message(self, sent_str):
try:
self.client.chat_postMessage(
channel=self.channel_id,
blocks=[{"type": "section", "text": {"type": "mrkdwn", "text": sent_str}}],
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["ok"] is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
print(f"Got an error: {e.response['error']}")
def pretty(logger: Logger, to_log: object) -> None:
"""Pretty prints the object to ease debugging. Logs at DEBUG and prints object over multiple calls to the logger
so should not be used in production where exporting logs to logstash at per line.
Arguments:
logger {Logger} -- the logger to use
to_log {object} -- object to be pretty printed
"""
# https://stackoverflow.com/a/21024454
for line in pprint.pformat(to_log).split("\n"):
logger.debug(line)
class PackagePathFilter(Filter):
"""Subclass of logging Filter class which provides two log record helpers, namely:
- relativepath: the relative path to the python module, this allows you to click on the path and line number from
a terminal and open the source at the exact line in an IDE.
- relative_path_and_lineno: a concatenation of `relativepath` and `lineno` to easily format the record helper to a
certain length.
Based heavily on https://stackoverflow.com/a/52582536/15200392
"""
def filter(self, record):
pathname = record.pathname
record.relativepath = None
record.relative_path_and_lineno = None
abs_sys_paths: Iterable[str] = map(os.path.abspath, sys.path)
for path in sorted(abs_sys_paths, key=len, reverse=True): # longer paths first
if not path.endswith(os.sep):
path += os.sep
if pathname.startswith(path):
record.relativepath = os.path.relpath(pathname, path)
record.relative_path_and_lineno = f"{record.relativepath}:{record.lineno}"
break
return True
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
wlanpi_webui.app
~~~~~~~~~~~~~~~~
the main flask app
"""
import logging
from flask import Flask, abort, redirect, request, send_from_directory
from wlanpi_webui.config import Config
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
app.logger.debug("app.py create_app reached")
app.logger.debug("registering errors blueprint")
from wlanpi_webui.errors import bp as errors_bp
app.register_blueprint(errors_bp)
app.logger.debug("errors blueprint registered")
app.logger.debug("registering speedtest blueprint")
from wlanpi_webui.speedtest import bp as speedtest_bp
app.register_blueprint(speedtest_bp)
app.logger.debug("speedtest blueprint registered")
app.logger.debug("registering profiler blueprint")
from wlanpi_webui.profiler import bp as profiler_bp
app.register_blueprint(profiler_bp)
app.logger.debug("profiler blueprint registered")
app.logger.debug("registering network blueprint")
from wlanpi_webui.network import bp as network_bp
app.register_blueprint(network_bp)
app.logger.debug("network blueprint registered")
@app.route("/admin")
def admin():
COCKPIT_PORT = "9090"
base = request.host.split(":")[0]
return redirect(f"http://{base}:{COCKPIT_PORT}")
@app.route("/static/img/<path:filename>")
def img(filename):
try:
return send_from_directory(f"{app.root_path}/static/img/", filename)
except FileNotFoundError:
abort(404)
if not app.debug and not app.testing:
if app.config["LOG_TO_STDOUT"]:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
app.logger.info("wlanpi_webui startup")
return app
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 14 10:53:41 2020
@author: Alejandro Villada Balbuena
"""
#%%
from __future__ import division, unicode_literals, print_function # for compatibility with Python 2 and 3
from pims import ND2Reader_SDK
from tifffile import imsave
import os
workdir = '/home/user/folder/'
filename = 'test'
frames = ND2Reader_SDK(workdir + filename + '.nd2')
#%%
n=len(frames)
outdir = workdir + 'sep/'
os.makedirs(outdir)
for x in range (0,n):
imsave(outdir + filename + '_T' + str(x).zfill(6) + '.tif', frames[x])
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ....core import StrEnum
class NotificationSourceEnum(StrEnum):
WALLET = "WALLET"
XSOLLA = "XSOLLA"
ADYEN = "ADYEN"
STRIPE = "STRIPE"
CHECKOUT = "CHECKOUT"
ALIPAY = "ALIPAY"
WXPAY = "WXPAY"
PAYPAL = "PAYPAL"
class StatusEnum(StrEnum):
PROCESSED = "PROCESSED"
ERROR = "ERROR"
WARN = "WARN"
IGNORED = "IGNORED"
class PaymentNotificationInfo(Model):
"""Payment notification info (PaymentNotificationInfo)
Properties:
created_at: (createdAt) REQUIRED str
id_: (id) REQUIRED str
namespace: (namespace) REQUIRED str
notification: (notification) REQUIRED Dict[str, Any]
notification_source: (notificationSource) REQUIRED Union[str, NotificationSourceEnum]
notification_type: (notificationType) REQUIRED str
payment_order_no: (paymentOrderNo) REQUIRED str
status: (status) REQUIRED Union[str, StatusEnum]
updated_at: (updatedAt) REQUIRED str
external_id: (externalId) OPTIONAL str
status_reason: (statusReason) OPTIONAL str
"""
# region fields
created_at: str # REQUIRED
id_: str # REQUIRED
namespace: str # REQUIRED
notification: Dict[str, Any] # REQUIRED
notification_source: Union[str, NotificationSourceEnum] # REQUIRED
notification_type: str # REQUIRED
payment_order_no: str # REQUIRED
status: Union[str, StatusEnum] # REQUIRED
updated_at: str # REQUIRED
external_id: str # OPTIONAL
status_reason: str # OPTIONAL
# endregion fields
# region with_x methods
def with_created_at(self, value: str) -> PaymentNotificationInfo:
self.created_at = value
return self
def with_id(self, value: str) -> PaymentNotificationInfo:
self.id_ = value
return self
def with_namespace(self, value: str) -> PaymentNotificationInfo:
self.namespace = value
return self
def with_notification(self, value: Dict[str, Any]) -> PaymentNotificationInfo:
self.notification = value
return self
def with_notification_source(self, value: Union[str, NotificationSourceEnum]) -> PaymentNotificationInfo:
self.notification_source = value
return self
def with_notification_type(self, value: str) -> PaymentNotificationInfo:
self.notification_type = value
return self
def with_payment_order_no(self, value: str) -> PaymentNotificationInfo:
self.payment_order_no = value
return self
def with_status(self, value: Union[str, StatusEnum]) -> PaymentNotificationInfo:
self.status = value
return self
def with_updated_at(self, value: str) -> PaymentNotificationInfo:
self.updated_at = value
return self
def with_external_id(self, value: str) -> PaymentNotificationInfo:
self.external_id = value
return self
def with_status_reason(self, value: str) -> PaymentNotificationInfo:
self.status_reason = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "created_at"):
result["createdAt"] = str(self.created_at)
elif include_empty:
result["createdAt"] = ""
if hasattr(self, "id_"):
result["id"] = str(self.id_)
elif include_empty:
result["id"] = ""
if hasattr(self, "namespace"):
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "notification"):
result["notification"] = {str(k0): v0 for k0, v0 in self.notification.items()}
elif include_empty:
result["notification"] = {}
if hasattr(self, "notification_source"):
result["notificationSource"] = str(self.notification_source)
elif include_empty:
result["notificationSource"] = Union[str, NotificationSourceEnum]()
if hasattr(self, "notification_type"):
result["notificationType"] = str(self.notification_type)
elif include_empty:
result["notificationType"] = ""
if hasattr(self, "payment_order_no"):
result["paymentOrderNo"] = str(self.payment_order_no)
elif include_empty:
result["paymentOrderNo"] = ""
if hasattr(self, "status"):
result["status"] = str(self.status)
elif include_empty:
result["status"] = Union[str, StatusEnum]()
if hasattr(self, "updated_at"):
result["updatedAt"] = str(self.updated_at)
elif include_empty:
result["updatedAt"] = ""
if hasattr(self, "external_id"):
result["externalId"] = str(self.external_id)
elif include_empty:
result["externalId"] = ""
if hasattr(self, "status_reason"):
result["statusReason"] = str(self.status_reason)
elif include_empty:
result["statusReason"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
created_at: str,
id_: str,
namespace: str,
notification: Dict[str, Any],
notification_source: Union[str, NotificationSourceEnum],
notification_type: str,
payment_order_no: str,
status: Union[str, StatusEnum],
updated_at: str,
external_id: Optional[str] = None,
status_reason: Optional[str] = None,
) -> PaymentNotificationInfo:
instance = cls()
instance.created_at = created_at
instance.id_ = id_
instance.namespace = namespace
instance.notification = notification
instance.notification_source = notification_source
instance.notification_type = notification_type
instance.payment_order_no = payment_order_no
instance.status = status
instance.updated_at = updated_at
if external_id is not None:
instance.external_id = external_id
if status_reason is not None:
instance.status_reason = status_reason
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> PaymentNotificationInfo:
instance = cls()
if not dict_:
return instance
if "createdAt" in dict_ and dict_["createdAt"] is not None:
instance.created_at = str(dict_["createdAt"])
elif include_empty:
instance.created_at = ""
if "id" in dict_ and dict_["id"] is not None:
instance.id_ = str(dict_["id"])
elif include_empty:
instance.id_ = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "notification" in dict_ and dict_["notification"] is not None:
instance.notification = {str(k0): v0 for k0, v0 in dict_["notification"].items()}
elif include_empty:
instance.notification = {}
if "notificationSource" in dict_ and dict_["notificationSource"] is not None:
instance.notification_source = str(dict_["notificationSource"])
elif include_empty:
instance.notification_source = Union[str, NotificationSourceEnum]()
if "notificationType" in dict_ and dict_["notificationType"] is not None:
instance.notification_type = str(dict_["notificationType"])
elif include_empty:
instance.notification_type = ""
if "paymentOrderNo" in dict_ and dict_["paymentOrderNo"] is not None:
instance.payment_order_no = str(dict_["paymentOrderNo"])
elif include_empty:
instance.payment_order_no = ""
if "status" in dict_ and dict_["status"] is not None:
instance.status = str(dict_["status"])
elif include_empty:
instance.status = Union[str, StatusEnum]()
if "updatedAt" in dict_ and dict_["updatedAt"] is not None:
instance.updated_at = str(dict_["updatedAt"])
elif include_empty:
instance.updated_at = ""
if "externalId" in dict_ and dict_["externalId"] is not None:
instance.external_id = str(dict_["externalId"])
elif include_empty:
instance.external_id = ""
if "statusReason" in dict_ and dict_["statusReason"] is not None:
instance.status_reason = str(dict_["statusReason"])
elif include_empty:
instance.status_reason = ""
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, PaymentNotificationInfo]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[PaymentNotificationInfo]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[PaymentNotificationInfo, List[PaymentNotificationInfo], Dict[Any, PaymentNotificationInfo]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"createdAt": "created_at",
"id": "id_",
"namespace": "namespace",
"notification": "notification",
"notificationSource": "notification_source",
"notificationType": "notification_type",
"paymentOrderNo": "payment_order_no",
"status": "status",
"updatedAt": "updated_at",
"externalId": "external_id",
"statusReason": "status_reason",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"createdAt": True,
"id": True,
"namespace": True,
"notification": True,
"notificationSource": True,
"notificationType": True,
"paymentOrderNo": True,
"status": True,
"updatedAt": True,
"externalId": False,
"statusReason": False,
}
@staticmethod
def get_enum_map() -> Dict[str, List[Any]]:
return {
"notificationSource": ["WALLET", "XSOLLA", "ADYEN", "STRIPE", "CHECKOUT", "ALIPAY", "WXPAY", "PAYPAL"],
"status": ["PROCESSED", "ERROR", "WARN", "IGNORED"],
}
# endregion static methods
|
NAMES = ['', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen',
'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen']
NAMES_LENGTH = [len(name) for name in NAMES]
NAMES_TENS = ['', 'ten', 'twenty', 'thirty', 'forty', 'fifty', 'sixty',
'seventy', 'eighty', 'ninety']
NAMES_TENS_LENGTH = [len(name) for name in NAMES_TENS]
def letters_in_number(n: int) -> int:
if n <= 0 and n > 1000:
raise ValueError(f'Unsupported input {n}, should be 1 <= n <= 1000.')
elif n < 20:
return NAMES_LENGTH[n]
elif n < 100:
return NAMES_LENGTH[n % 10] + NAMES_TENS_LENGTH[n // 10]
elif n < 1000:
if n % 100 == 0:
return NAMES_LENGTH[n // 100] + len('hundred')
else:
return NAMES_LENGTH[n // 100] + len('hundred') + len('and') + \
letters_in_number(n % 100)
elif n == 1000:
return len('one') + len('thousand')
def solve(bound: int=1000) -> str:
return sum(letters_in_number(n) for n in range(1, bound + 1))
|
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Cvx IO functionality
"""
try:
from cvx.img_loader import ImgLoader
from cvx.img_processor import ImgProcessor
except ModuleNotFoundError:
raise ModuleNotFoundError("Cvx could not be found. Please setup cvx from:"
" https://github.com/jtuyls/cvx and add to"
" PYTHONPATH") |
from event.models import Worker
from django.http import JsonResponse
from django.shortcuts import render
# Create your views here.
def Worker_List(request,event_id, name):
result = {}
if name is not None:
query = Worker.objects.filter(last_name__icontains=name, working_at=event_id)
for count, worker in enumerate(query):
result[count] = {
'id': worker.id,
'first_name': worker.first_name,
'last_name': worker.last_name,
'date_of_birth': worker.date_of_birth,
'employer': worker.employer.name,
'url': worker.get_absolute_url(),
}
return JsonResponse(result)
def CheckStaffPayrollID(request, payroll_id):
result = {'exists':False}
if payroll_id is not None:
query = Worker.objects.filter(payroll_id = payroll_id)
result['exists'] = query.exists()
return JsonResponse(result)
|
from os.path import join
from pydantic import BaseSettings
import appdirs
APP_NAME = "iscc-registry"
APP_DIR = appdirs.user_data_dir(appname=APP_NAME)
ENV_PATH = join(APP_DIR, ".env")
__version__ = "0.1.0"
# Ledger-ID Ethereum
LEDGER_ID_ETH = 0b01000000 .to_bytes(1, "big")
class Settings(BaseSettings):
ipfs_address: str = "/ip4/127.0.0.1/tcp/5001/http"
web3_address: str = "http://127.0.0.1:7545"
contract_address: str = ""
db_dir: str = APP_DIR
verification_domain: str = ""
settings = Settings(_env_file=ENV_PATH)
|
# coding=utf-8
# Creation date: 19 окт. 2020
# Creation time: 10:22
# Creator: SteamPeKa
import warnings
from typing import TypeVar, Generic, Iterable, Union
import numpy
VT = TypeVar("VT")
class AbstractMetric(Generic[VT], object):
def __init__(self, *args, **kwargs):
return
def __call__(self, value1: VT, value2: VT) -> float:
raise NotImplementedError(
"Abstract method of class AbstractMetric call from class {}".format(self.__class__.__name__)
)
def get_metric_tensor(self, possible_values: Iterable[VT], symmetric=True):
if symmetric:
return numpy.array(
[[self(row_value, column_value) for column_value in possible_values] for row_value in possible_values]
)
else:
return numpy.array([[self(row_value, column_value) if k > c else 0.0
for k, column_value in enumerate(possible_values)]
for c, row_value in enumerate(possible_values)])
class NominalMetric(AbstractMetric[VT]):
def __call__(self, value1: VT, value2: VT) -> float:
return 0.0 if value1 == value2 else 1.0
class IntervalMetric(AbstractMetric[VT]):
def __call__(self, value1: VT, value2: VT) -> float:
return (value1 - value2) ** 2
class RatioMetric(AbstractMetric[VT]):
def __call__(self, value1: VT, value2: VT) -> float:
if value1 == value2:
return 0.0
return ((value1 - value2) / (value1 + value2)) ** 2
class CircularMetric(AbstractMetric[VT]):
# noinspection PyUnusedLocal
def __init__(self, possible_values_count: int = None, *args, **kwargs):
super().__init__()
if possible_values_count is None:
raise ValueError("CircularMetric has to be initialised with number of possible intervals")
elif not isinstance(possible_values_count, int):
raise ValueError("Number of possible intervals have to be a natural number")
elif possible_values_count < 1:
raise ValueError("Number of possible intervals have to be a natural number")
self.__U = possible_values_count
def __call__(self, value1: VT, value2: VT) -> float:
if value1 == value2:
return 0.0
difference_is_int = abs((value1 - value2) - int(value1 - value2)) < 1e-8
if any([value1 > self.__U,
value2 > self.__U,
abs(value1 - value2) > self.__U,
not difference_is_int]):
warnings.warn(f"Input arguments for CircularMetric({value1},{value2})"
f" is not match the number of intervals of the circle {self.__U}")
return numpy.square(numpy.sin(numpy.pi * ((value1 - value2) / self.__U)))
def get_metric(metric: Union[None, AbstractMetric, str], *args, **kwargs) -> AbstractMetric:
if metric is None:
return NominalMetric(*args, **kwargs)
elif isinstance(metric, AbstractMetric):
return metric
elif isinstance(metric, str):
if len(metric) == 0:
raise ValueError("Empty metric-name string!")
if "nominal".startswith(metric):
return NominalMetric(*args, **kwargs)
elif "interval".startswith(metric):
return IntervalMetric(*args, **kwargs)
elif "ratio".startswith(metric):
return RatioMetric(*args, **kwargs)
elif "circular".startswith(metric):
return CircularMetric(*args, **kwargs)
else:
raise ValueError("Unsupported metric name: {}".format(metric))
else:
raise ValueError("Unsupported metric-name value type: {} ({})".format(type(metric), str(metric)))
|
import requests
import random
import string
amount = int(input("Amount To Generate: "))
for i in range(amount):
randomNumbers = random.randint(1, 999999)
randomLetters = ('').join(random.choices(
string.ascii_letters, k=25))
bypass = requests.Session()
generatePayload = {
"nickname": f"fake{randomNumbers}"
}
header = {
# random alternative link to get the cookies
"referer": f"https://tlk.io/{randomLetters}"
}
generate = bypass.post("https://tlk.io/api/participant",
data=generatePayload, headers=header)
print(f"Generated Cookie: {i}")
f = open("cookies.txt", "a")
info = generate.json()
for cookie in generate.cookies:
f.write(f"""
Cookie: _tlkio_session={cookie.value}
Token: {info['token']}
Name: fake{randomNumbers}
""")
f.close()
|
"""
Update task priority.
Initial delay: rand(1 minute)
Periodic delay: 5 minutes
"""
import logging
import random
import time
import asyncio
from collections import defaultdict
from tornado.ioloop import IOLoop
from iceprod.server import GlobalID
from iceprod.server.priority import Priority
logger = logging.getLogger('update_task_priority')
def update_task_priority(module):
"""
Initial entrypoint.
Args:
module (:py:class:`iceprod.server.modules.schedule`): schedule module
"""
# initial delay
IOLoop.current().call_later(random.randint(60,600), run, module.rest_client)
async def run(rest_client, dataset_id=None, debug=False):
"""
Actual runtime / loop.
Args:
rest_client (:py:class:`iceprod.core.rest_client.Client`): rest client
dataset_id (str): (optional) dataset id to update
debug (bool): debug flag to propagate exceptions
"""
start_time = time.time()
prio = Priority(rest_client)
try:
args = {
'status': 'waiting|queued|processing|reset',
'keys': 'task_id|depends|dataset_id',
}
if dataset_id:
ret = await rest_client.request('GET', f'/datasets/{dataset_id}/tasks', args)
tasks = ret.values()
else:
url = f'/datasets/{dataset_id}/tasks'
ret = await rest_client.request('GET', '/tasks', args)
tasks = ret['tasks']
async def check_deps(task):
dep_futures = []
for dep in task['depends']:
t = asyncio.create_task(rest_client.request('GET', f'/tasks/{dep}'))
dep_futures.append(t)
for ret in await asyncio.gather(*dep_futures):
if ret['status'] != 'complete':
logger.info('dependency not met for task %s', task['task_id'])
return None
return task
# check dependencies
futures = set()
tasks2 = []
for task in tasks:
if len(futures) >= 20:
done, pending = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
for t in done:
ret = await t
if ret:
tasks2.append(ret)
futures = pending
t = asyncio.create_task(check_deps(task))
futures.add(t)
while futures:
done, pending = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
for t in done:
ret = await t
if ret:
tasks2.append(ret)
futures = pending
logger.warning(f'len(tasks) = {len(tasks)}')
logger.warning(f'len(tasks2) = {len(tasks2)}')
# update priorities
futures = set()
for task in tasks:
if len(futures) >= 20:
done, pending = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
futures = pending
p = await prio.get_task_prio(task['dataset_id'], task['task_id'])
logger.info('updating priority for %s.%s = %.4f', task['dataset_id'], task['task_id'], p)
t = asyncio.create_task(rest_client.request('PATCH', f'/tasks/{task["task_id"]}', {'priority': p}))
futures.add(t)
while futures:
done, pending = await asyncio.wait(futures)
futures = pending
except Exception:
logger.error('error updating task priority', exc_info=True)
if debug:
raise
# run again after 4 hour delay
stop_time = time.time()
delay = max(3600*4 - (stop_time-start_time), 600)
IOLoop.current().call_later(delay, run, rest_client)
|
import os
from notebook.utils import (
url_path_join, url_escape
)
from notebook.base.handlers import (
IPythonHandler, FilesRedirectHandler
)
from tornado import web
from biokbase.narrative.common.kblogging import (
get_logger, log_event
)
from biokbase.narrative.common.util import kbase_env
import urllib
import tornado.log
from traitlets.config import Application
from biokbase.auth import (
get_user_info,
init_session_env
)
HTTPError = web.HTTPError
app_log = tornado.log.app_log # alias
if Application.initialized:
app_log = Application.instance().log
g_log = get_logger("biokbase.narrative")
auth_cookie_name = "kbase_session"
class NarrativeHandler(IPythonHandler):
def get(self, path):
"""
Inject the user's KBase cookie before trying to look up a file.
One of our big use cases bypasses the typical Jupyter login mechanism.
"""
client_ip = self.request.remote_ip
http_headers = self.request.headers
ua = http_headers.get('User-Agent', 'unknown')
auth_cookie = self.cookies.get(auth_cookie_name, None)
if auth_cookie:
token = urllib.unquote(auth_cookie.value)
else:
raise web.HTTPError(status_code=401,
log_message='No auth cookie, denying access',
reason='Authorization required for Narrative access')
if token != kbase_env.auth_token:
init_session_env(get_user_info(token), client_ip)
log_event(g_log, 'session_start', {'user': kbase_env.user, 'user_agent': ua})
"""
get renders the notebook template if a name is given, or
redirects to the '/files/' handler if the name is not given.
"""
path = path.strip('/')
cm = self.contents_manager
# will raise 404 on not found
try:
model = cm.get(path, content=False)
except web.HTTPError as e:
raise
# if e.status_code == 404 and 'files' in path.split('/'):
# # 404, but '/files/' in URL, let FilesRedirect take care of it
# return FilesRedirectHandler.redirect_to_files(self, path)
# else:
# raise
if model['type'] != 'notebook':
# not a notebook, redirect to files
return FilesRedirectHandler.redirect_to_files(self, path)
name = url_escape(path.rsplit('/', 1)[-1])
path = url_escape(path)
self.write(
self.render_template(
'notebook.html',
notebook_path=path,
notebook_name=path,
kill_kernel=False,
mathjax_url=self.mathjax_url
)
)
def load_jupyter_server_extension(nb_server_app):
"""
Called when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
web_app = nb_server_app.web_app
host_pattern = '.*$'
route_pattern = url_path_join(web_app.settings['base_url'], r'(ws\.\d+\.obj\.\d+.*)')
web_app.add_handlers(host_pattern, [(route_pattern, NarrativeHandler)])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 10:48:38 2021
@author: rgonzalez
"""
import qiskit.aqua.components.optimizers as aqoptim
import sys
import numpy as np
optimizer_avail = ['cg',
'slsqp',
'l-bfgs-b',
'cobyla',
'nelder-mead',
'p-bfgs',
'powell',
'spsa',
'tnc',
'nft',
'gsls',
'adam',
'amsgrad',
'aqgd']
def set_cg(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in ConjugateGradient print option')
sys.exit()
else:
disp = True
gtol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(gtol) != 0:
gtol = int(gtol[0].split('=')[1].strip('\n'))
gtol = 10**(-gtol)
else:
gtol = 10**(-8)
optimizer = aqoptim.CG(maxiter=maxiter,
disp=disp,
gtol=gtol,
tol=None,
eps=1.4901161193847656e-08)
optimizer_name = 'Conjugate Gradient'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Convergence Tolerance: ', '{:.3e}'.format(gtol)]]
return optimizer, optimizer_name, optimizer_prints
def set_slsqp(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in SLSQP print option')
sys.exit()
else:
disp = True
ftol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(ftol) != 0:
ftol = int(ftol[0].split('=')[1].strip('\n'))
ftol = 10**(-ftol)
else:
ftol = 10**(-8)
optimizer = aqoptim.SLSQP(maxiter=maxiter,
disp=disp,
ftol=ftol,
tol=None,
eps=1.4901161193847656e-08)
optimizer_name = 'SLSQP'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Convergence Tolerance: ', '{:.3e}'.format(ftol)]]
return optimizer, optimizer_name, optimizer_prints
def set_lbfgsb(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
maxfun = [opt for opt in optimizer_opts if opt.startswith('maxfun')]
if len(maxfun) != 0:
maxfun = int(maxfun[0].split('=')[1].strip('\n'))
else:
maxfun = 25000
optimizer = aqoptim.L_BFGS_B(maxiter=maxiter,
maxfun=maxfun,
factr=10,
iprint=1,
epsilon=1.0e-09)
ftol = 10*np.finfo(float).eps
optimizer_name = 'L-BFGS-B'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Maximum Function Evaluations: ', maxfun],
['Convergence Tolerance: ', '{:.3e}'.format(ftol)]]
return optimizer, optimizer_name, optimizer_prints
def set_cobyla(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in COBYLA print option')
sys.exit()
else:
disp = True
gtol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(gtol) != 0:
gtol = int(gtol[0].split('=')[1].strip('\n'))
gtol = 10**(-gtol)
else:
gtol = 10**(-8)
optimizer = aqoptim.COBYLA(maxiter=maxiter,
disp=disp,
rhobeg=gtol)
optimizer_name = 'COBYLA'
optimizer_prints = [['Maximum Iterations: ', maxiter]]
return optimizer, optimizer_name, optimizer_prints
def set_nm(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
maxfun = [opt for opt in optimizer_opts if opt.startswith('maxfun')]
if len(maxfun) != 0:
maxfun = int(maxfun[0].split('=')[1].strip('\n'))
else:
maxfun = 25000
gtol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(gtol) != 0:
gtol = int(gtol[0].split('=')[1].strip('\n'))
gtol = 10**(-gtol)
else:
gtol = 10**(-8)
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in NelderMead print option')
sys.exit()
else:
disp = True
adaptive = [opt for opt in optimizer_opts if opt.startswith('adapt')]
if len(adaptive) != 0:
adaptive = adaptive[0].split('=')[1].strip('\n')
if adaptive == 'true':
adaptive = True
elif adaptive == 'false':
adaptive = False
else:
print('OptimizerError: Invalid option in Nelder-Mead print option')
sys.exit()
else:
adaptive = False
optimizer = aqoptim.NELDER_MEAD(maxiter=maxiter,
maxfev=maxfun,
disp=disp,
xatol=gtol,
adaptive=adaptive)
optimizer_name = 'Nelder-Mead'
if adaptive:
adaptive == 'True'
else:
adaptive == 'False'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Maximum Function Evaluations: ', maxfun],
['Convergence Tolerance: ', '{:.3e}'.format(gtol)],
['Adaptive Method: ', adaptive]]
return optimizer, optimizer_name, optimizer_prints
def set_pbfgs(optimizer_opts):
from psutil import cpu_count
numprocs = cpu_count()
maxfun = [opt for opt in optimizer_opts if opt.startswith('maxfun')]
if len(maxfun) != 0:
maxfun = int(maxfun[0].split('=')[1].strip('\n'))
else:
maxfun = 25000
optimizer = aqoptim.P_BFGS(maxfun=maxfun,
factr=10,
iprint=1,
max_processes=numprocs)
ftol = 10*np.finfo(float).eps
optimizer_name = 'P-BFGS'
optimizer_prints = [['Maximum Function Evaluations: ', maxfun],
['Convergence Tolerance: ', '{:.3e}'.format(ftol)]]
return optimizer, optimizer_name, optimizer_prints
def set_powell(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
maxfun = [opt for opt in optimizer_opts if opt.startswith('maxfun')]
if len(maxfun) != 0:
maxfun = int(maxfun[0].split('=')[1].strip('\n'))
else:
maxfun = 25000
gtol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(gtol) != 0:
gtol = int(gtol[0].split('=')[1].strip('\n'))
gtol = 10**(-gtol)
else:
gtol = 10**(-8)
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in Powell print option')
sys.exit()
else:
disp = True
optimizer = aqoptim.POWELL(maxiter=maxiter,
maxfev=maxfun,
disp=disp,
xtol=gtol)
optimizer_name = 'Powell'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Maximum Function Evaluations: ', maxfun],
['Convergence Tolerance: ', '{:.3e}'.format(gtol)]]
return optimizer, optimizer_name, optimizer_prints
def set_spsa(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
optimizer = aqoptim.SPSA(maxiter=maxiter,
save_steps=1,
last_avg=1,
c0=0.6283185307179586,
c1=0.1,
c2=0.602,
c3=0.101,
c4=0,
skip_calibration=False)
optimizer_name = 'SPSA'
optimizer_prints = [['Maximum Iterations: ', maxiter]]
return optimizer, optimizer_name, optimizer_prints
def set_tnc(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in Truncated Newton print option')
sys.exit()
else:
disp = True
gtol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(gtol) != 0:
gtol = int(gtol[0].split('=')[1].strip('\n'))
gtol = 10**(-gtol)
else:
gtol = 10**(-8)
optimizer = aqoptim.TNC(maxiter=maxiter,
disp=disp,
gtol=gtol,
accuracy=0,
ftol=-1,
xtol=-1,
eps=1.4901161193847656e-08)
optimizer_name = 'Truncated Newton'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Convergence Tolerance: ', '{:.3e}'.format(gtol)]]
return optimizer, optimizer_name, optimizer_prints
def set_nft(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
maxfun = [opt for opt in optimizer_opts if opt.startswith('maxfun')]
if len(maxfun) != 0:
maxfun = int(maxfun[0].split('=')[1].strip('\n'))
else:
maxfun = 25000
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in NFT print option')
sys.exit()
else:
disp = True
optimizer = aqoptim.NFT(maxiter=maxiter,
maxfev=maxfun,
disp=disp,
reset_interval=32)
optimizer_name = 'Nakanishi-Fujii-Todo'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Maximum Function Evaluations: ', maxfun]]
return optimizer, optimizer_name, optimizer_prints
def set_gsls(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
maxfun = [opt for opt in optimizer_opts if opt.startswith('maxfun')]
if len(maxfun) != 0:
maxfun = int(maxfun[0].split('=')[1].strip('\n'))
else:
maxfun = 25000
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in NFT print option')
sys.exit()
else:
disp = True
optimizer = aqoptim.GSLS(maxiter=maxiter,
max_eval=maxfun,
disp=disp,
sampling_radius=1e-06,
sample_size_factor=1,
initial_step_size=0.01,
min_step_size=1e-10,
step_size_multiplier=0.4,
armijo_parameter=0.1,
min_gradient_norm=1e-08,
max_failed_rejection_sampling=50)
optimizer_name = 'Gaussian-Smoothed Line Search'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Maximum Function Evaluations: ', maxfun]]
return optimizer, optimizer_name, optimizer_prints
def set_adam(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
gtol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(gtol) != 0:
gtol = int(gtol[0].split('=')[1].strip('\n'))
gtol = 10**(-gtol)
else:
gtol = 10**(-8)
optimizer = aqoptim.ADAM(maxiter=maxiter,
tol=gtol,
lr=0.001,
beta_1=0.9,
beta_2=0.99,
noise_factor=1e-08,
eps=1e-10,
amsgrad=False,
snapshot_dir=None)
optimizer_name = 'ADAM'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Convergence Tolerance: ', '{:.3e}'.format(gtol)]]
return optimizer, optimizer_name, optimizer_prints
def set_amsgrad(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
gtol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(gtol) != 0:
gtol = int(gtol[0].split('=')[1].strip('\n'))
gtol = 10**(-gtol)
else:
gtol = 10**(-8)
optimizer = aqoptim.ADAM(maxiter=maxiter,
tol=gtol,
lr=0.001,
beta_1=0.9,
beta_2=0.99,
noise_factor=1e-08,
eps=1e-10,
amsgrad=True,
snapshot_dir=None)
optimizer_name = 'AMSGRAD'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Convergence Tolerance: ', '{:.3e}'.format(gtol)]]
return optimizer, optimizer_name, optimizer_prints
def set_aqgd(optimizer_opts):
maxiter = [opt for opt in optimizer_opts if opt.startswith('maxiter')]
if len(maxiter) != 0:
maxiter = int(maxiter[0].split('=')[1].strip('\n'))
else:
maxiter = 25000
disp = [opt for opt in optimizer_opts if opt.startswith('print')]
if len(disp) != 0:
disp = disp[0].split('=')[1].strip('\n')
if disp == 'true':
disp = True
elif disp == 'false':
disp = False
else:
print('OptimizerError: Invalid option in AQGD print option')
sys.exit()
else:
disp = True
gtol = [opt for opt in optimizer_opts if opt.startswith('conv')]
if len(gtol) != 0:
gtol = int(gtol[0].split('=')[1].strip('\n'))
gtol = 10**(-gtol)
else:
gtol = 10**(-8)
optimizer = aqoptim.AQGD(maxiter=maxiter,
disp=disp,
tol=gtol,
eta=1.0,
momentum=0.25,
param_tol=gtol,
averaging=10)
optimizer_name = 'Analytic Quantum Gradient Descent'
optimizer_prints = [['Maximum Iterations: ', maxiter],
['Convergence Tolerance: ', '{:.3e}'.format(gtol)]]
return optimizer, optimizer_name, optimizer_prints
def _optim(blocks):
optimizer_block = [block for block in blocks if block.startswith('%optimizer')]
if len(optimizer_block) == 0:
optimizer = aqoptim.CG(maxiter=100,
disp=False,
gtol=10**(-8),
tol=None,
eps=1.4901161193847656e-08)
optimizer_name = 'Conjugate Gradient'
else:
optimizer_block = optimizer_block[0]
optimizer_opts = optimizer_block.split(' ')
optimizer_method = [opt for opt in optimizer_opts if opt.startswith('method')]
if len(optimizer_method) != 0:
optimizer_method = optimizer_method[0].split('=')[1].strip('\n')
if optimizer_method not in optimizer_avail:
print('OptimizerError: Invalid optimizer selected')
sys.exit()
else:
if optimizer_method == 'cg':
optimizer, optimizer_name, optimizer_prints = set_cg(optimizer_opts)
elif optimizer_method == 'slsqp':
optimizer, optimizer_name, optimizer_prints = set_slsqp(optimizer_opts)
elif optimizer_method == 'l-bfgs-b':
optimizer, optimizer_name, optimizer_prints = set_lbfgsb(optimizer_opts)
elif optimizer_method == 'cobyla':
optimizer, optimizer_name, optimizer_prints = set_cobyla(optimizer_opts)
elif optimizer_method == 'nelder-mead':
optimizer, optimizer_name, optimizer_prints = set_nm(optimizer_opts)
elif optimizer_method == 'p-bfgs':
optimizer, optimizer_name, optimizer_prints = set_pbfgs(optimizer_opts)
elif optimizer_method == 'powell':
optimizer, optimizer_name, optimizer_prints = set_powell(optimizer_opts)
elif optimizer_method == 'spsa':
optimizer, optimizer_name, optimizer_prints = set_spsa(optimizer_opts)
elif optimizer_method == 'tnc':
optimizer, optimizer_name, optimizer_prints = set_tnc(optimizer_opts)
elif optimizer_method == 'nft':
optimizer, optimizer_name, optimizer_prints = set_nft(optimizer_opts)
elif optimizer_method == 'gsls':
optimizer, optimizer_name, optimizer_prints = set_gsls(optimizer_opts)
elif optimizer_method == 'adam':
optimizer, optimizer_name, optimizer_prints = set_adam(optimizer_opts)
elif optimizer_method == 'amsgrad':
optimizer, optimizer_name, optimizer_prints = set_amsgrad(optimizer_opts)
elif optimizer_method == 'aqgd':
optimizer, optimizer_name, optimizer_prints = set_aqgd(optimizer_opts)
return optimizer, optimizer_name, optimizer_prints |
import logging
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from functools import reduce
from operator import mul
import lab as B
import numpy as np
import wbml.out
from lab.shape import Shape
from plum import Dispatcher
from .util import lazy_jnp as jnp
from .util import lazy_tf as tf
from .util import lazy_torch as torch
from .util import match, pack, unpack
__all__ = ["Provider", "Vars"]
log = logging.getLogger(__name__)
_dispatch = Dispatcher()
@_dispatch
def _assign(x: B.NPNumeric, value: B.Numeric):
np.copyto(x, value)
return x
@_dispatch
def _assign(x: B.TFNumeric, value: B.Numeric):
# The return value of `x.assign` tends to behave unpredictably. We therefore just
# return `x`.
x.assign(value)
return x
@_dispatch
def _assign(x: B.TorchNumeric, value: B.Numeric):
if not isinstance(value, B.TorchNumeric):
value = torch.tensor(value, dtype=x.dtype, device=x.device)
x.data.copy_(value)
return x
@_dispatch
def _assign(x: B.JAXNumeric, value: B.Numeric):
return B.to_active_device(jnp.array(value, dtype=x.dtype))
class Provider(metaclass=ABCMeta):
@abstractmethod
def unbounded(
self,
init=None,
shape=None,
dtype=None,
name=None,
visible=True,
): # pragma: no cover
"""Get an unbounded variable.
Args:
init (tensor, optional): Initialisation of the variable.
shape (tuple[int], optional): Shape of the variable. Defaults to scalar.
dtype (data type, optional): Data type of the variable. Defaults to that
of the storage.
name (str, optional): Name of the variable.
visible (bool, optional): Make the variable visible to variable-aggregating
operations. Defautls to `True`.
Returns:
tensor: Variable.
"""
def ubnd(self, *args, **kw_args):
"""Alias for :meth:`.vars.Provider.unbounded`."""
return self.unbounded(*args, **kw_args)
def get(self, *args, **kw_args): # pragma: no cover
warnings.warn(
"The method `get` is deprecated. Please use `unbounded` or `ubnd` instead.",
category=DeprecationWarning,
)
return self.unbounded(*args, **kw_args)
@abstractmethod
def positive(
self,
init=None,
shape=None,
dtype=None,
name=None,
visible=True,
): # pragma: no cover
"""Get a positive variable.
Args:
init (tensor, optional): Initialisation of the variable.
shape (tuple[int], optional): Shape of the variable. Defaults to scalar.
dtype (data type, optional): Data type of the variable. Defaults to that
of the storage.
name (str, optional): Name of the variable.
visible (bool, optional): Make the variable visible to variable-aggregating
operations. Defautls to `True`.
Returns:
tensor: Variable.
"""
def pos(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.positive`."""
return self.positive(*args, **kw_args)
@abstractmethod
def bounded(
self,
init=None,
lower=1e-4,
upper=1e4,
shape=None,
dtype=None,
name=None,
visible=True,
): # pragma: no cover
"""Get a bounded variable.
Args:
init (tensor, optional): Initialisation of the variable.
lower (tensor, optional): Lower bound. Defaults to `1e-4`.
upper (tensor, optional): Upper bound. Defaults to `1e4`.
shape (tuple[int], optional): Shape of the variable. Defaults to scalar.
dtype (data type, optional): Data type of the variable. Defaults to that
of the storage.
name (hashable, optional): Name of the variable.
visible (bool, optional): Make the variable visible to variable-aggregating
operations. Defautls to `True`.
Returns:
tensor: Variable.
"""
def bnd(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.bounded`."""
return self.bounded(*args, **kw_args)
@abstractmethod
def lower_triangular(
self,
init=None,
shape=None,
dtype=None,
name=None,
visible=True,
): # pragma: no cover
"""Get a lower-triangular matrix.
Args:
init (tensor, optional): Initialisation of the variable.
shape (int, optional): Number of rows and columns of the matrix.
dtype (data type, optional): Data type of the variable. Defaults to
that of the storage.
name (hashable, optional): Name of the variable.
visible (bool, optional): Make the variable visible to variable-aggregating
operations. Defautls to `True`.
Returns:
tensor: Variable.
"""
def tril(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.lower_triangular`."""
return self.lower_triangular(*args, **kw_args)
@abstractmethod
def positive_definite(
self,
init=None,
shape=None,
dtype=None,
name=None,
visible=True,
): # pragma: no cover
"""Get a positive-definite matrix.
Args:
init (tensor, optional): Initialisation of the variable.
shape (int, optional): Number of rows and columns of the matrix.
dtype (data type, optional): Data type of the variable. Defaults to
that of the storage.
name (hashable, optional): Name of the variable.
visible (bool, optional): Make the variable visible to variable-aggregating
operations. Defautls to `True`.
Returns:
tensor: Variable.
"""
def pd(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.positive_definite`."""
return self.positive_definite(*args, **kw_args)
@abstractmethod
def orthogonal(
self,
init=None,
shape=None,
dtype=None,
name=None,
visible=True,
method="svd",
): # pragma: no cover
"""Get an orthogonal matrix.
Args:
init (tensor, optional): Initialisation of the variable.
shape (int, optional): Number of rows and columns of the matrix.
dtype (data type, optional): Data type of the variable. Defaults to
that of the storage.
name (hashable, optional): Name of the variable.
visible (bool, optional): Make the variable visible to variable-aggregating
operations. Defautls to `True`.
method ('svd', 'expm' or 'cayley'): Parametrisation. Method of
parametrisation. Defaults to 'svd'.
Returns:
tensor: Variable.
"""
def orth(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.orthogonal`."""
return self.orthogonal(*args, **kw_args)
@abstractmethod
def __getitem__(self, name): # pragma: no cover
"""Get a variable by name.
Args:
name (hashable): Name of variable.
Returns:
tensor: Variable.
"""
@property
def struct(self):
""":class:`.spec.Structlike`: Name variables by indexing into the variable
container like a struct."""
# We perform the import here to avoid a circular import.
from .spec import Struct
return Struct(self)
def _check_matrix_shape(shape, square=True):
if len(shape) != 2:
raise ValueError(f"Shape {shape} must be the shape of a matrix.")
if square and shape[0] != shape[1]:
raise ValueError(f"Shape {shape} must be square.")
def _check_init_shape(init, shape):
if init is None and shape is None:
raise ValueError(
f"The shape must be given to automatically initialise "
f"a matrix variable."
)
if shape is None:
shape = B.shape(init)
return init, shape
class Vars(Provider):
"""Variable storage.
Args:
dtype (data type): Data type of the variables.
source (tensor, optional): Tensor to source variables from. Defaults to
not being used.
"""
def __init__(self, dtype, source=None):
self.dtype = dtype
# Source:
self.source = source
self.source_index = 0
# Storage:
self.vars = []
self.transforms = []
self.inverse_transforms = []
self.visible = []
# Lookup:
self.name_to_index = OrderedDict()
self._get_latent_vars_cache = {}
def _resolve_dtype(self, dtype):
if dtype is None:
return self.dtype
else:
return dtype
def _get_var(
self,
transform,
inverse_transform,
init,
generate_init,
shape,
shape_latent,
dtype,
name,
visible,
):
# If the name already exists, return that variable.
try:
return self[name]
except KeyError:
pass
# A new variable will be added. Clear lookup cache.
self._get_latent_vars_cache.clear()
# Resolve data type.
dtype = self._resolve_dtype(dtype)
# If no source is provided, get the latent from from the provided
# initialiser.
if self.source is None:
# Resolve initialisation.
if init is None:
init = generate_init(shape=shape, dtype=dtype)
else:
init = B.cast(dtype, init)
# Ensure that the initialisation is on the right device.
init = B.to_active_device(init)
# Allow broadcasting in the initialisation.
if shape is not None:
init = init * B.ones(B.dtype(init), *shape)
# Double check the shape of the initialisation.
if shape is not None and Shape(*shape) != Shape(*B.shape(init)):
raise ValueError(
f"Shape of initial value {B.shape(init)} is not equal to the "
f"desired shape {shape}."
)
# Construct optimisable variable.
latent = inverse_transform(init)
if isinstance(self.dtype, B.TFDType):
latent = tf.Variable(latent)
elif isinstance(self.dtype, B.TorchDType):
pass # All is good in this case.
elif isinstance(self.dtype, B.JAXDType):
latent = jnp.array(latent)
else:
# Must be a NumPy data type.
assert isinstance(self.dtype, B.NPDType)
latent = np.array(latent)
else:
# Get the latent variable from the source.
length = reduce(mul, shape_latent, 1)
latent_flat = self.source[self.source_index : self.source_index + length]
self.source_index += length
# Cast to the right data type.
latent = B.cast(dtype, B.reshape(latent_flat, *shape_latent))
# Store transforms.
self.vars.append(latent)
self.transforms.append(transform)
self.inverse_transforms.append(inverse_transform)
# Store whether the variable is trainable.
self.visible.append(visible)
# Get index of the variable.
index = len(self.vars) - 1
# Store name if given.
if name is not None:
self.name_to_index[name] = index
# Generate the variable and return.
return transform(latent)
def unbounded(self, init=None, shape=None, dtype=None, name=None, visible=True):
# If nothing is specific, generate a scalar.
if init is None and shape is None:
shape = ()
def generate_init(shape, dtype):
return B.randn(dtype, *shape)
return self._get_var(
transform=lambda x: x,
inverse_transform=lambda x: x,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape,
dtype=dtype,
name=name,
visible=visible,
)
def positive(self, init=None, shape=None, dtype=None, name=None, visible=True):
# If nothing is specific, generate a scalar.
if init is None and shape is None:
shape = ()
def generate_init(shape, dtype):
return B.rand(dtype, *shape)
return self._get_var(
transform=lambda x: B.exp(x),
inverse_transform=lambda x: B.log(x),
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape,
dtype=dtype,
name=name,
visible=visible,
)
def bounded(
self,
init=None,
lower=1e-4,
upper=1e4,
shape=None,
dtype=None,
name=None,
visible=True,
):
# If nothing is specific, generate a scalar.
if init is None and shape is None:
shape = ()
def transform(x):
return lower + (upper - lower) / (1 + B.exp(-x))
def inverse_transform(x):
return B.log(x - lower) - B.log(upper - x)
def generate_init(shape, dtype):
return lower + B.rand(dtype, *shape) * (upper - lower)
return self._get_var(
transform=transform,
inverse_transform=inverse_transform,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape,
dtype=dtype,
name=name,
visible=visible,
)
def lower_triangular(
self,
init=None,
shape=None,
dtype=None,
name=None,
visible=True,
):
init, shape = _check_init_shape(init, shape)
_check_matrix_shape(shape)
# Result must be square. Get a side.
side = shape[0]
def transform(x):
return B.vec_to_tril(x)
def inverse_transform(x):
return B.tril_to_vec(x)
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return transform(B.tril_to_vec(mat))
shape_latent = (int(side * (side + 1) / 2),)
return self._get_var(
transform=transform,
inverse_transform=inverse_transform,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape_latent,
dtype=dtype,
name=name,
visible=visible,
)
def positive_definite(
self,
init=None,
shape=None,
dtype=None,
name=None,
visible=True,
):
init, shape = _check_init_shape(init, shape)
_check_matrix_shape(shape)
# Result must be square. Get a side.
side = shape[0]
def transform(x):
log_diag = x[:side]
chol = B.vec_to_tril(x[side:], offset=-1) + B.diag(B.exp(log_diag))
return B.matmul(chol, chol, tr_b=True)
def inverse_transform(x):
chol = B.cholesky(B.reg(x))
return B.concat(B.log(B.diag(chol)), B.tril_to_vec(chol, offset=-1))
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return B.matmul(mat, mat, tr_b=True)
shape_latent = (int(side * (side + 1) / 2),)
return self._get_var(
transform=transform,
inverse_transform=inverse_transform,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape_latent,
dtype=dtype,
name=name,
visible=visible,
)
def orthogonal(
self,
init=None,
shape=None,
dtype=None,
name=None,
visible=True,
method="svd",
):
init, shape = _check_init_shape(init, shape)
if method == "svd":
_check_matrix_shape(shape, square=False)
n, m = shape
shape_latent = (n, m)
# Fix singular values.
sing_vals = B.linspace(self._resolve_dtype(dtype), 1, 2, min(n, m))
def transform(x):
u, s, v = B.svd(x)
# u * v' is the closest orthogonal matrix to x in Frobenius norm.
return B.matmul(u, v, tr_b=True)
def inverse_transform(x):
if n >= m:
return x * sing_vals[None, :]
else:
return x * sing_vals[:, None]
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return transform(mat)
elif method == "expm":
_check_matrix_shape(shape)
side = shape[0]
shape_latent = (int(side * (side + 1) / 2 - side),)
def transform(x):
tril = B.vec_to_tril(x, offset=-1)
skew = tril - B.transpose(tril)
return B.expm(skew)
def inverse_transform(x):
return B.tril_to_vec(B.logm(x), offset=-1)
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return transform(B.tril_to_vec(mat, offset=-1))
elif method == "cayley":
_check_matrix_shape(shape)
side = shape[0]
shape_latent = (int(side * (side + 1) / 2 - side),)
def transform(x):
tril = B.vec_to_tril(x, offset=-1)
skew = tril - B.transpose(tril)
eye = B.eye(skew)
return B.solve(eye + skew, eye - skew)
def inverse_transform(x):
eye = B.eye(x)
skew = B.solve(eye + x, eye - x)
return B.tril_to_vec(skew, offset=-1)
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return transform(B.tril_to_vec(mat, offset=-1))
else:
raise ValueError(f'Unknown parametrisation "{method}".')
return self._get_var(
transform=transform,
inverse_transform=inverse_transform,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape_latent,
dtype=dtype,
name=name,
visible=visible,
)
def __getitem__(self, name):
index = self.name_to_index[name]
return self.transforms[index](self.vars[index])
def __contains__(self, name):
"""Check if a variable exists.
Args:
name (str): Name of the variable.
"""
return name in self.name_to_index
def assign(self, name, value):
"""Assign a value to a variable.
Args:
name (hashable): Name of variable to assign value to.
value (tensor): Value to assign.
Returns:
tensor: Assignment result.
"""
index = self.name_to_index[name]
# Overwrite data.
self.vars[index] = _assign(
self.vars[index], self.inverse_transforms[index](value)
)
return self.vars[index]
def delete(self, name):
"""Delete a variable.
Args:
name (str): Name of the variable.
"""
if name in self:
i = self.name_to_index[name]
names_to_index = {}
for name, name_i in self.name_to_index.items():
# Exclude the case where `name_i == i`, because that's the variable
# that we want to delete.
if name_i < i:
# One before is deleted: that's fine.
names_to_index[name] = name_i
elif name_i > i:
# One before is deleted: decrement index.
names_to_index[name] = name_i - 1
self.name_to_index = names_to_index
del self.vars[i]
del self.transforms[i]
del self.inverse_transforms[i]
# We've modified the underlying lists, so it's very important to clear the
# cache.
self._get_latent_vars_cache.clear()
def copy(self, detach=False, f=lambda x: x):
"""Create a copy of the variable manager that shares the variables.
Args:
detach (bool, optional): Detach the variables in PyTorch. Defaults
to `False`.
f (function, optional): Apply this function to every latent variable.
Returns:
:class:`.vars.Vars`: Copy.
"""
vs = Vars(dtype=self.dtype)
vs.transforms = list(self.transforms)
vs.inverse_transforms = list(self.inverse_transforms)
vs.name_to_index = OrderedDict(self.name_to_index)
vs.visible = list(self.visible)
vs.vars = [f(x) for x in self.vars]
if detach:
vs.detach()
return vs
def detach(self):
"""Detach all variables held in PyTorch."""
self.vars = [v.detach() for v in self.vars]
def requires_grad(self, value, *names):
"""Set which variables require a gradient in PyTorch.
Args:
value (bool): Require a gradient.
*names (hashable): Specify variables by name.
"""
for var in self.get_latent_vars(*names):
var.requires_grad_(value)
def get_latent_vars(self, *names, return_indices=False):
"""Get visible latent variables.
If no arguments are supplied, then all visible latent variables are retrieved.
Furthermore, the same collection of variables is guaranteed to be
returned in the same order.
Args:
*names (hashable): Get variables by name.
return_indices (bool, optional): Get the indices of the variables instead.
Defaults to `False`.
Returns:
list: Matched latent variables or their indices, depending on the
value of `indices`.
"""
# If nothing is specified, return all latent variables.
if len(names) == 0:
if return_indices:
return [i for i, t in enumerate(self.visible) if t]
else:
return [var for var, t in zip(self.vars, self.visible) if t]
# Attempt to use cache.
try:
indices = self._get_latent_vars_cache[names]
except KeyError:
# Divide names into includes and excludes.
includes, excludes = [], []
for name in names:
if name.startswith("-"):
excludes.append(name[1:])
else:
includes.append(name)
# If no name to include was specified, include all.
if len(includes) == 0:
includes = ["*"]
# Collect indices of matches.
indices = set()
for name in includes:
a_match = False
for k, v in self.name_to_index.items():
included = match(name, k)
excluded = any(match(n, k) for n in excludes)
if included and not excluded:
indices |= {v}
a_match = True
# Check that there was a match.
if not a_match:
raise ValueError(f'No variable matching "{name}".')
# Sort the indices for a consistent result.
indices = sorted(indices)
# Store in cache before proceeding.
self._get_latent_vars_cache[names] = indices
# Return indices if asked for. Otherwise, return variables.
if return_indices:
return [i for i in indices if self.visible[i]]
else:
return [self.vars[i] for i in indices if self.visible[i]]
def get_vars(self, *args, **kw_args): # pragma: no cover
warnings.warn(
"The method `get_vars` is deprecated. Please use `get_latent_vars` "
"instead.",
category=DeprecationWarning,
)
return self.get_latent_vars(*args, **kw_args)
def get_latent_vector(self, *names):
"""Get visible latent variables stacked in a vector.
If no arguments are supplied, then all visible latent variables are retrieved.
Args:
*names (hashable): Get variables by name.
Returns:
tensor: Vector consisting of all latent values
"""
return pack(*self.get_latent_vars(*names))
def get_vector(self, *args, **kw_args): # pragma: no cover
warnings.warn(
"The method `get_vector` is deprecated. Please use `get_latent_vector` "
"instead.",
category=DeprecationWarning,
)
return self.get_latent_vector(*args, **kw_args)
def set_latent_vector(self, values, *names, differentiable=False):
"""Set all the latent variables by values from a vector.
If no arguments are supplied, then all latent variables are retrieved.
Args:
values (tensor): Vector to set the variables to.
*names (hashable): Set variables by name.
differentiable (bool, optional): Differentiable assignment. Defaults
to `False`.
Returns:
list: Assignment results.
"""
values = unpack(values, *map(B.shape, self.get_latent_vars(*names)))
if differentiable:
# Do a differentiable assignment.
for index, value in zip(
self.get_latent_vars(*names, return_indices=True), values
):
self.vars[index] = value
return values
else:
# Overwrite data.
assignments = []
for index, value in zip(
self.get_latent_vars(*names, return_indices=True), values
):
self.vars[index] = _assign(self.vars[index], value)
assignments.append(self.vars[index])
return assignments
def set_vector(self, *args, **kw_args): # pragma: no cover
warnings.warn(
"The method `set_vector` is deprecated. Please use `set_latent_vector` "
"instead.",
category=DeprecationWarning,
)
return self.get_latent_vector(*args, **kw_args)
@property
def names(self):
"""All available names."""
return list(self.name_to_index.keys())
def print(self):
"""Print all variables."""
for name in self.names:
wbml.out.kv(name, self[name])
|
"""
#!/usr/bin/python3
# This file attempts to listen in on the communications of the MIL Bus and log activity in a .json file
# This file is a WORK IN PROGRESS
# TODO: Replace example event with actual code that gets an event from the simulator CORRECTLY
import socket
import threading
import json
import time
import os
from datetime import datetime
#from Bus_Controller.Physical_Layer_Emulation.Communication_Socket_BC import BC_Listener
#from Bus_Controller.Message_Layer.ML_Decoder_BC import MessageLayerDecoderBC
from Bus_Controller.Physical_Layer_Emulation.Communication_Socket_BC import BC_Listener
from Bus_Controller.Message_Layer.ML_Decoder_BC import MessageLayerDecoderBC
global logger_thread
global eventdata
# Class that codes to listen in on communications over the bus, modification of Bus_Controller class
class Bus_Logger:
# Function to add date and time information to a log entry
def addtime (self, dictionary) :
"This takes a dictionary and adds entries for the date and time of function call"
now = datetime.now()
dictionary['time_year'] = now.strftime('%Y')
dictionary['time_month'] = now.strftime('%m')
dictionary['time_day'] = now.strftime('%d')
dictionary['time_hour'] = now.strftime('%H')
dictionary['time_minute'] = now.strftime('%M')
dictionary['time_second'] = now.strftime('%S')
dictionary['time_microsecond'] = now.strftime('%f')
def logevent (self, event) :
"This takes an event of received data, parsed as a dictionary, and logs it"
# Name of file should be from date
self.addtime(event)
now = datetime.now()
jsonfilename = now.strftime('%m-%d-%Y_log.json')
# Output of event to json
with open(os.getcwd() + '/io/jsons/' + jsonfilename, 'a') as event_dumped :
json.dump(event, event_dumped)
def handle_incoming_frame(self, frame):
self.logevent(MessageLayerDecoderBC().interprete_incoming_frame(frame))
def start_listener(self):
"This starts a listening thread"
# Log start of logging
logstart = { 'message' : 'Logging has started' }
self.logevent(logstart)
# Example dict of event to log (Comment out for real runs)
#event_EX = {
# 'flag1' : '01',
# 'flag2' : '11',
# 'length' : '07'
#}
#self.logevent(event_EX)
listener = BC_Listener()
listener_thread = threading.Thread(target=listener.start_listening)
listener_thread.start()
while True:
if not len(listener.data_received) == 0:
eventData = {'Message' : listener.data_received}
self.logevent(eventData)
listener.data_received.pop(0)
if __name__ == "__main__":
# Log incoming data
logger_thread = threading.Thread(target=Bus_Logger().start_listener)
logger_thread.start()
""" |
import time
from mpi4py import MPI
import numpy as np
import pickle
import os
t_init = time.perf_counter()
# for Beskow
comm = MPI.COMM_WORLD
size = comm.Get_size()# make sure nx is divisible by size
rank = comm.Get_rank()
# # for personal laptop
# rank = 0
# size = 1
nsteps = 10
len_T_rank = 2+nsteps*2
T_rank = np.ones((1, len_T_rank))
pT = 0
T_rank[0, pT] = t_init
pT +=1
for i in range(nsteps):
time.sleep(rank*0.001) #simulate "computing for some micro seconds"
time.sleep(0.001)
T_rank[0, pT] = time.perf_counter()
pT +=1
time.sleep(0.001) # simulate idle period
T_rank[0, pT] = time.perf_counter()
pT +=1
if rank==0:
existFolder = os.path.exists('timeRecords')
if not existFolder:
directoryNow = os.getcwd()
os.mkdir(directoryNow+'//timeRecords')
parameters = [size, nsteps, len_T_rank]
f = open('timeRecords/size_nsteps_lenTrank.txt','wb')
pickle.dump(parameters,f)
f.close()
print('size, nsteps, lenTrank = '),
print(parameters)
T_rank[0, pT] = time.perf_counter() # record the termination time
f = open('timeRecords/T_rank'+str(rank)+'.txt','wb')
pickle.dump(T_rank,f)
f.close()
|
from django.db import models
import string, random, datetime
from profiles.models import UserProfile, Location, Surcharges, User
from decimal import *
from menu.models import Product, Entree, Pizza, PizzaTopping, Side
from localflavor.us.models import PhoneNumberField, USStateField, USZipCodeField
#modify this to check against prior conf orders.
def make_conf(length=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(length))
class Order(models.Model):
customer = models.ForeignKey(User, blank=True, null=True)
created_date = models.DateTimeField(auto_now=False, auto_now_add=True)
stamped = models.BooleanField(default=False)
stamped_time = models.DateTimeField(auto_now=True, auto_now_add=False, blank=True, null=True)
complete = models.BooleanField(default=False)
delivery = models.BooleanField(default=False)
delivery_available = models.BooleanField(default=False)
location = models.ForeignKey(Location, blank=True, null=True)
total = models.DecimalField(max_digits=20, decimal_places=2, default=0)
subtotal = models.DecimalField(max_digits=20, decimal_places=2, default=0)
taxes = models.DecimalField(max_digits=20, decimal_places=2, default=0)
first_name = models.CharField(max_length=120, blank=True, null=True)
last_name = models.CharField(max_length=120, blank=True, null=True)
street_address = models.CharField(max_length=120, blank=True, null=True)
city = models.CharField(max_length=120, blank=True, null=True)
state = USStateField(blank=True, null=True)
zipcode = USZipCodeField(blank=True, null=True)
phone = PhoneNumberField(blank=True, null=True)
email = models.EmailField(max_length=120, blank=True, null=True)
note = models.TextField(max_length=1000,blank=True, null=True)
conf_number = models.CharField(max_length=20, blank=True, null=True)
#delivery charge needs to be separate from lines
def get_delivery_charge(self):
return Location.objects.get(id=str(self.location)).get_delivery_charge()
def compute_subtotal(self):
lineitems = list(OrderLineItem.objects.filter(order=self.id))
delivery_charge = Location.objects.get(id=str(self.location)).get_delivery_charge()
lines = []
for lineitem in lineitems:
lines.append(lineitem.line_price)
if self.delivery == True:
pre_sub = sum(lines)
subtotal = sum(lines) + delivery_charge
return subtotal
else:
return sum(lines)
def compute_taxes(self):
subtotal = self.compute_subtotal()
loc = Surcharges.objects.get(location=self.location).location
tax_rate = Decimal(str(loc.get_tax_rate()))
return Decimal(round(subtotal * tax_rate, 2)).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
def compute_total(self):
return Decimal(round(self.compute_subtotal() + self.compute_taxes(), 2)).quantize(Decimal('.01'), rounding=ROUND_HALF_UP)
class Meta:
ordering = ['-stamped_time']
def __unicode__(self):
return str(str(self.created_date) + ' ' + str(self.id)) + str(self.customer)
(PIZZA, 'PIZZA'),
(SIDE, 'SIDE'),
(SOUP,'SOUP'),
(SALAD,'SALAD'),
(BREADSTICKS,'BREADSTICKS'),
(PASTA,'PASTA'),
(WINGS,'WINGS'),
(SANDWICH,'SANDWICH'),
(BEVERAGE,'BEVERAGE'),
class OrderLineItem(models.Model):
order = models.ForeignKey(Order)
product = models.ForeignKey('menu.Product')
size = models.CharField(max_length=7, blank=True, null=True)
PIZZA = 'PIZZA'
SIDE = 'SIDE'
SOUP = 'SOUP'
SALAD = 'SALAD'
BREADSTICKS = 'BREADSTICKS'
PASTA = 'PASTA'
WINGS = 'WINGS'
SANDWICH = 'SANDWICH'
BEVERAGE = 'BEVERAGE'
ITEM_TYPES = (
(PIZZA, 'PIZZA'),
(SIDE,'SIDE'),
(SOUP,'SOUP'),
(SALAD,'SALAD'),
(BREADSTICKS,'BREADSTICKS'),
(PASTA, 'PASTA'),
(WINGS, 'WINGS'),
(SANDWICH,'SANDWICH'),
(BEVERAGE, 'BEVERAGE'),
)
product_type = models.CharField(max_length=50, choices=ITEM_TYPES, default=PIZZA)
qty = models.PositiveIntegerField(default=1)
line_price = models.DecimalField(max_digits=20, decimal_places=2, blank=True, null=True)
toppings = models.ManyToManyField(PizzaTopping, blank=True, null=True, related_name='topping')
def get_price(self):
if self.product_type == 'PIZZA':
pizza_price = Pizza.objects.get(product_id=self.product, size=self.size).get_price()
pricing = []
pricing.append(pizza_price)
for topping in self.toppings.all():
pricing.append(topping.price)
return sum(pricing)
elif self.product_type == 'ENTREE':
return Entree.objects.get(product_id=self.product, size=self.size).get_price()
elif self.product_type == 'SIDE':
return Side.objects.get(product_id=self.product, size=self.size).price
def __unicode__(self):
return str(self.product)
|
hexa = {
'0': '0000',
'1': '0001',
'2': '0010',
'3': '0011',
'4': '0100',
'5': '0101',
'6': '0110',
'7': '0111',
'8': '1000',
'9': '1001',
'A': '1010',
'B': '1011',
'C': '1100',
'D': '1101',
'E': '1110',
'F': '1111'
}
def hexToBin(hex_number):
return hexa[hex_number[0]] + hexa[hex_number[1]]
def binToHex(bin_number):
first, second, result = bin_number[0:4], bin_number[4:], ''
if len(first) != 4:
first = first.zfill(4)
if len(second) != 4:
second = second.zfill(4)
for key, value in hexa.items():
if first == value:
result += key
for key, value in hexa.items():
if second == value:
result += key
return result
|
#!/usr/bin/env python
import time
import uuid
import argparse
import hivemind
def get_public_ip():
from whatsmyip.ip import get_ip
from whatsmyip.providers import GoogleDnsProvider
return get_ip(GoogleDnsProvider)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--address', type=str, required=False, default=None,
help="this machine's network address. Use public IP for global experiments, "
"local address for private runs.")
parser.add_argument('--listen_on', type=str, default='0.0.0.0:*', required=False,
help="'localhost' for local connections only, '0.0.0.0' for ipv4 '[::]' for ipv6")
parser.add_argument('--refresh_period', type=float, default=30, required=False,
help="coordinator will fetch random keys every :this many: seconds to detect inactive peers")
args = parser.parse_args()
if args.address is None:
print("No address specified. Attempting to infer address from DNS.")
try:
args.address = get_public_ip()
except ImportError as e:
print("Could not infer network address, please specify --address manually.")
exit(-1)
dht = hivemind.DHT(start=True, listen_on=args.listen_on, endpoint=f"{args.address}:*")
print(f"Running DHT root at {args.address}:{dht.port}", flush=True)
while True:
dht.get(uuid.uuid4().bytes, latest=True)
time.sleep(args.refresh_period)
|
from __future__ import absolute_import
import six
import warnings
from civis._version import __version__
from civis.civis import APIClient, find, find_one
from civis import io, ml, parallel, utils
if six.PY2:
warnings.warn("Support for Python 2 is deprecated will be "
"removed in the next version release after "
"April 1, 2020.", FutureWarning)
__all__ = ["__version__", "APIClient", "find", "find_one", "io",
"ml", "parallel", "utils"]
|
#This class loads all config files and tests some issue before the game begins.
import json
import pygame
from utils.io_supporter import decrypt_file
#from configString import config_string as configuration
# CONSTANTS
## CONSTANTS
UP = "UP"
DOWN = "DOWN"
LEFT = "LEFT"
RIGHT = "RIGHT"
UP_LEFT = "UP_LEFT"
UP_RIGHT = "UP_RIGHT"
DOWN_LEFT = "DOWN_LEFT"
DOWN_RIGHT = "DOWN_RIGHT"
# Define as cores
BRIGHT_RED = (255, 0, 0)
RED = (200, 0, 0)
BRIGHT_GREEN = (0, 255, 0)
GREEN = (0, 200, 0)
BRIGHT_BLUE = (0, 0, 255)
BLUE = (0, 0, 200)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
PINK = (247, 41, 234)
YELLOW = (230, 255, 0)
GRAY = (200, 200, 200)
ORANGE = (250, 230, 0)
#
#configfile = "utils/config.json"
class ConfigLoader(object):
def __init__(self, filename):
self.filename = filename
try:
config = decrypt_file(filename)
self.json_data = json.loads(config)
#with open(filename) as config:
#self.json_data = json.loads(config.read())
self.mapdir = self.json_data["mapdir"]
self.soundtrackdir = self.json_data["soundtrackdir"]
self.spritedir = self.json_data["spritedir"]
self.soundeffectdir = self.json_data["soundeffectdir"]
self.stages = self.json_data["stages"]
self.stage_list = self.__stageToList()
self.entities = self.json_data["entities"]
self.enemy_list = self.__enemyToList()
self.npc_list = self.__npcToList()
self.player_list = self.__playerToList()
self.elements = self.json_data["elements"]
self.portal_list = self.__portalToList()
self.items = self.json_data["item"]
self.item_list = self.__itemToList()
self.equips = self.json_data["equip"]
self.helmet_list = self.__equipToList("helmet")
self.weapons_list = self.__weaponToList("weapon")
# self.weapons = self.json_data["weapons"]
# self.sword_list = self.__weaponToList("sword")
# self.noweapon_list = self.__weaponToList("noweapon")
self.sprites = self.json_data["sprites"]
self.ent_sprite_list = self.__spriteToList("entities")
self.eff_sprite_list = self.__spriteToList("effects")
self.icon_sprite_list = self.__spriteToList("icons")
self.other_sprite_list = self.__spriteToList("others")
self.sound_list = self.json_data["sounds"]
#self.sound_list = self.__soundToList()
except IOError:
print("Cannot open config file {}".format(filename))
##########################
def stageByID(self, id):
for stg in self.stage_list:
if stg.id == id:
return stg
print "no config info found for this stage id: %s" %id
return None
def __stageToList(self):
list_of_stages = []
for s in self.stages:
list_of_stages.append(stageConfigClass(s, self.mapdir, self.soundtrackdir))
if list_of_stages:
return list_of_stages
return None
################################# To List ################################
def __portalToList(self):
list_of_portals = []
for p in self.elements['portal']:
list_of_portals.append(portalConfigClass(p, self.spritedir))
if list_of_portals:
return list_of_portals
return None
def __entityToList(self):
list_of_entities = []
for e in self.entities:
list_of_entities.append(entityConfigClass(e, self.spritedir))
if list_of_entities:
return list_of_entities
return None
def __enemyToList(self):
list_of_enemies = []
for e in self.entities['enemy']:
list_of_enemies.append(entityConfigClass(e, self.spritedir))
if list_of_enemies:
return list_of_enemies
return None
def __npcToList(self):
list_of_npc = []
for e in self.entities['npc']:
list_of_npc.append(entityConfigClass(e, self.spritedir))
if list_of_npc:
return list_of_npc
return None
def __playerToList(self):
list_of_players = []
for e in self.entities['player']:
list_of_players.append(entityConfigClass(e, self.spritedir))
if list_of_players:
return list_of_players
return None
def __itemToList(self):
list_of_item = []
for w in self.items:
list_of_item.append(itemConfigClass(w, self.spritedir, self.soundeffectdir))
if list_of_item:
return list_of_item
return None
def __equipToList(self, equip):
list_of_equip = []
for w in self.equips[equip]:
list_of_equip.append(equipConfigClass(w, self.spritedir, self.soundeffectdir))
if list_of_equip:
return list_of_equip
return None
# def __helmetToList(self, equip):
# list_of_equip = []
# for w in self.equips[equip]:
# list_of_equip.append(equipConfigClass(w, self.spritedir, self.soundeffectdir))
#
# if list_of_equip:
# return list_of_equip
#
# return None
def __weaponToList(self, weapon):
list_of_weapons = []
#for w in self.weapons[weapon]:
for w in self.equips[weapon]:
list_of_weapons.append(weaponConfigClass(w, self.spritedir, self.soundeffectdir))
if list_of_weapons:
return list_of_weapons
return None
def __spriteToList(self, sprite):
list_of_sprites = []
for w in self.sprites[sprite]:
list_of_sprites.append(spriteConfigClass(w, self.spritedir))
if list_of_sprites:
return list_of_sprites
return None
################################# END To List ################################
################################# By ID ################################
def itemByID(self, id):
for i in self.item_list:
if i.id == id:
return i
return None
def equipByID(self, id):
if id.count("helmet"):
for w in self.helmet_list:
if w.id == id:
return w
elif id.count("chest"):
for w in self.chest_list:
if w.id == id:
return w
elif id.count("bottom"):
for w in self.bottom_list:
if w.id == id:
return w
elif id.count("boots"):
for w in self.boots_list:
if w.id == id:
return w
elif id.count("weapon"):
for w in self.weapons_list:
if w.id == id:
return w
return None
def weaponByID(self, id):
if id.count('nw'):
for w in self.noweapon_list:
if w.id == id:
return w
elif id.count('swd'):
for w in self.sword_list:
if w.id == id:
return w
elif id.count('bow'):
for w in self.bow_list:
if w.id == id:
return w
elif id.count('rod'):
for w in self.rod_list:
if w.id == id:
return w
return None
def entityByID(self, id):
for e in self.entity_list:
if e.id == id:
return e
return None
def enemyByID(self, id):
for e in self.enemy_list:
if e.id == id:
return e
return None
def NPCByID(self, id):
for e in self.npc_list:
if e.id == id:
return e
return None
def playerByID(self, id):
for p in self.player_list:
if p.id == id:
return p
return None
def portalByID(self, id):
for p in self.portal_list:
if p.id == id:
return p
return None
def spriteByID(self, id):
if id.count('ent'):
for s in self.ent_sprite_list:
if s.id == id:
return s
elif id.count('eff'):
for s in self.eff_sprite_list:
if s.id == id:
return s
elif id.count('icon'):
for s in self.icon_sprite_list:
if s.id == id:
return s
elif id.count('other'):
for s in self.other_sprite_list:
if s.id == id:
return s
return None
def soundByID(self, id):
if self.sound_list.get(id):
sound = self.soundeffectdir + self.sound_list.get(id)
return pygame.mixer.Sound(sound)
return None
################################# END By ID ################################
class stageConfigClass(object):
def __init__(self, dictStage, map_dir, soundtrack_dir):
self.name = dictStage['name']
self.id = dictStage['id']
self.mapfile = map_dir + dictStage['mapfile']
self.type = dictStage['type']
self.npcs = dictStage['npcs']
#self.npc_spot = dictStage['npc_spot']
self.enemies = dictStage['enemies']
self.properties = dictStage['properties']
self.soundtrack = soundtrack_dir + dictStage['soundtrack']
self.portals = dictStage['portals']
self.startpoint = dictStage['startpoint']
self.scriptID = dictStage['scriptID']
class entityConfigClass(object):
def __init__(self, dictEntity, spritedir):
self.name = dictEntity['name']
#self.sprite = spritedir + dictEntity['sprite']
self.sprite = dictEntity['sprite']
self.id = dictEntity['id']
self.type = dictEntity['type']
self.dimensionx = dictEntity['dimensionx']
self.dimensiony = dictEntity['dimensiony']
self.weight = dictEntity['weight']
self.stage = dictEntity['stage']
#self.position = dictEntity['position']
#self.spot = dictEntity['spot']
self.dialogs = dictEntity['dialogs']
self.properties = dictEntity['properties']
self.status = dictEntity['status']
class portalConfigClass(object):
def __init__(self, dictPortal, spritedir):
self.id = dictPortal['id']
self.sprite = dictPortal['sprite']
#x, y = dictPortal['position'][0], dictPortal['position'][1]
#self.position = (x, y)
self.position = dictPortal['position']
self.destination = dictPortal['destination']
self.dst_stage = dictPortal['dst_stage']
class itemConfigClass(object):
def __init__(self, dictItem, spritedir, soundeffectdir):
self.name = dictItem['name']
self.description = dictItem['description']
self.id = dictItem['id']
self.sprite = dictItem['sprite']
self.function = dictItem['function']
self.parameter = dictItem['parameter']
def use(self, action = None):
if action != None:
action(self.parameter)
class equipConfigClass(object):
def __init__(self, dictEquip, spritedir, soundeffectdir):
self.name = dictEquip['name']
self.description = dictEquip['description']
self.id = dictEquip['id']
self.sprite = dictEquip['sprite']
self.ad = dictEquip['ad']
self.ap = dictEquip['ap']
self.armor = dictEquip['armor']
self.mdef = dictEquip['mdef']
self.movspeed = dictEquip['movspeed']
self.aspd = dictEquip['aspd']
self.sound = soundeffectdir + dictEquip['sound']
class weaponConfigClass(object):
def __init__(self, dictWeapon, spritedir, soundeffectdir):
self.name = dictWeapon['name']
self.description = dictWeapon['description']
self.id = dictWeapon['id']
self.sprite = dictWeapon['sprite']
self.ad = dictWeapon['ad']
self.ap = dictWeapon['ap']
self.armor = dictWeapon['armor']
self.mdef = dictWeapon['mdef']
self.movspeed = dictWeapon['movspeed']
self.aspd = dictWeapon['aspd']
self.bullet_speed = dictWeapon['bullet_speed']
self.energycost = dictWeapon['energycost']
self.sound = soundeffectdir + dictWeapon['sound']
self.s_range = int(dictWeapon['s_range'])
self.e_range = int(dictWeapon['e_range'])
class spriteConfigClass(object):
def __init__(self, dictSprite, spritedir):
self.name = dictSprite['name']
self.id = dictSprite['id']
self.file = spritedir + dictSprite['file']
self.lin = dictSprite['lin']
self.col = dictSprite['col']
self.atk_cols = dictSprite['atk_cols']
self.sound = dictSprite['sound']
|
''' Running instructions:
python find_outliers.py CHICAGO_CRIME_DATA.csv NUM_DATA_POINTS_AFTER STOP_AFTER
'''
import cPickle as pickle
import lib
import numpy as np
import sys
import sklearn.ensemble
num_after = int(sys.argv[2])
stop_after = int(sys.argv[3])
classifier = pickle.load(open('classifier.pickle'))
histogram = pickle.load(open('histogram.pickle'))
i = 0
with open(sys.argv[1], 'rb') as file:
for row in file:
i += 1
if i < num_after: continue
if i > num_after+stop_after: break
arr = row.split(' ')
id = arr[0]
tod = float(arr[1])
longitude = float(arr[2])
latitude = float(arr[3])
loc = int(arr[4])
typ = int(arr[5])
query_data = np.matrix([tod, longitude, latitude, loc])
PCs = classifier.predict_proba(query_data)
PC = PCs[0, typ]
pC = histogram.get_prop(typ)
score = (PC - pC)/ pC
print id, typ, score
|
import os, sys; sys.path.append(os.path.join("..", "..", ".."))
from pattern.table import Table
from pattern.table import uid, pprint
# The main purpose of the pattern module is to facilitate automated processes
# for (text) data acquisition and (linguistical) data mining.
# Often, this involves a tangle of messy text files and custom formats to store the data.
# The Table class offers a useful datasheet (cfr. MS Excel) in Python code.
# It can be saved as a CSV text file that is both human/machine readable.
# See also: examples/01-web/03-twitter.py
# Supported values that are imported and exported correctly:
# str, unicode, int, float, bool, None
# For other data types, custom encoder and decoder functions can be used.
t = Table(rows=[
[uid(), "broccoli", "vegetable"],
[uid(), "turnip", "vegetable"],
[uid(), "asparagus", "vegetable"],
[uid(), "banana", "fruit" ],
])
print t.rows[0] # A list of rows.
print t.columns[1] # A list of columns, where each column is a list of values.
print
# Columns can be manipulated directly like any other Python list.
# This can be slow for large tables. If you need a fast way to do matrix math,
# use numpy (http://numpy.scipy.org/) instead.
# The purpose of Table is data storage.
t.columns.append([
"green",
"purple",
"white",
"yellow"
])
# Save as a comma-separated (unicode) text file.
t.save("food.txt")
# Load a table from file.
t = Table.load("food.txt")
pprint(t, truncate=50, padding=" ", fill=".")
|
## Constructing batches from the stream of data read
BATCH_SIZE = 64
def read_batch(stream, batch_size=BATCH_SIZE):
batch = []
for element in stream:
batch.append(element)
if len(batch) == batch_size:
yield batch
batch = []
yield batch |
import os
def process(split="dev"):
file = os.path.join("/home/data_ti6_c/wangdq/ST/Librispeech/" + split)
src_file = file + '_asr.tsv'
head = ['id', 'audio', 'n_frames', 'tgt_text', 'speaker']
content = []
with open(src_file) as fsrc:
for index, src in enumerate(fsrc.readlines()):
if index == 0:
continue
sid, audio, n_frame, _, src, speaker = src.strip().split('\t')
if sid == "id":
continue
if "Librispeech" in audio:
audio = "/home/data_ti6_c/wangdq/ST/Librispeech/fbank80.zip:" + ":".join(audio.split(":")[-2:])
elif "mustc" in audio:
audio = "/home/data_ti6_c/wangdq/ST/must-c/ende/fbank80.zip:" + ":".join(audio.split(":")[-2:])
content.append("\t".join([sid, audio, n_frame, src, speaker]) + '\n')
file = "/home/data_ti6_c/wangdq/ST/small_external/ende/ASR/" + split + '.tsv'
with open(file, 'w') as fsrc:
fsrc.write("\t".join(head) + '\n')
fsrc.writelines(content)
if __name__ == '__main__':
# process("dev")
# process("test")
process("train")
|
# -*- coding: utf-8 -*-
from zope.interface import Interface
class ILDAPPlugin(Interface):
"""Marker Interface for the LDAP Plugin
"""
class ICacheSettingsRecordProvider(Interface):
"""cache settings provider, expects to return a record on call
In future this may be used more generic.
"""
VALUE_NOT_CACHED = dict()
class IPluginCacheHandler(Interface):
"""Handles caching of the node trees used in the PAS Plugin
"""
def get():
"""the cached value or VALUE_NOT_CACHED
"""
def set(value):
"""sets a value in the cache
"""
def invalidate():
"""removes a value from the cache
"""
|
# Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import logging
import re
from datetime import datetime
from decimal import Decimal
from streamsets.sdk.utils import Version
from streamsets.testframework.markers import sdc_min_version
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# pylint: disable=pointless-statement, too-many-locals
def test_field_flattener(sdc_builder, sdc_executor):
"""Test field flattener processor. The pipeline would look like:
dev_raw_data_source >> field_flattener >> trash
With given raw_data below, /contact/address will move to newcontact/address and its elements will be flatten as
home.state and home.zipcode
"""
name_separator = '.'
raw_data = """
{
"contact": {
"name": "Jane Smith",
"id": "557",
"address": {
"home": {
"state": "NC",
"zipcode": "27023"
}
}
},
"newcontact": {
"address": {}
}
}
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_flattener = pipeline_builder.add_stage('Field Flattener')
field_flattener.set_attributes(fields=['/contact/address'], flatten_in_place=False,
target_field='/newcontact/address', flatten='SPECIFIC_FIELDS',
name_separator=name_separator, remove_flattened_field=True)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_flattener >> trash
pipeline = pipeline_builder.build('Field Flattener pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
new_value = snapshot[field_flattener.instance_name].output[0].field
# assert remove_flatten_field
assert 'address' not in new_value['contact']
# assert flatten_target_field with name_seperator
assert f'home{name_separator}state' in new_value['newcontact']['address']
assert f'home{name_separator}zipcode' in new_value['newcontact']['address']
def test_field_hasher(sdc_builder, sdc_executor):
"""Test field hasher. The pipeline would look like:
dev_raw_data_source >> field_hasher >> trash
With the given config below, we will have md5passcode, sha1passcode, sha2passcode and myrecord md5 holding for
entire record. In versions >= 3.7.0, we'll have sha256passcode instead of sha2passcode, as well as sha512passcode.
"""
raw_id = '557'
raw_passcode = 'mysecretcode'
raw_dict = dict(contact=dict(name='Jane Smith', id=raw_id, passcode=raw_passcode))
raw_data = json.dumps(raw_dict)
hash_in_place = [
{
'sourceFieldsToHash': ['/contact/id'],
'hashType': 'MD5'
}
]
hash_to_target = [
{
'sourceFieldsToHash': ['/contact/passcode'],
'hashType': 'MD5',
'targetField': '/md5passcode',
'headerAttribute': 'md5passcode'
}, {
'sourceFieldsToHash': ['/contact/passcode'],
'hashType': 'SHA1',
'targetField': '/sha1passcode',
'headerAttribute': 'sha1passcode'
}
]
# In 3.7.0, SHA2 was renamed to SHA256 (it will be automatically upgraded) and SHA512 was added
if Version(sdc_builder.version) < Version('3.7.0'):
hash_to_target.append(
{
'sourceFieldsToHash': ['/contact/passcode'],
'hashType': 'SHA2',
'targetField': '/sha2passcode',
'headerAttribute': 'sha2passcode'
}
)
if Version(sdc_executor.version) >= Version('3.7.0'):
hash_to_target.append(
{
'sourceFieldsToHash': ['/contact/passcode'],
'hashType': 'SHA256',
'targetField': '/sha256passcode',
'headerAttribute': 'sha256passcode'
}
)
hash_to_target.append(
{
'sourceFieldsToHash': ['/contact/passcode'],
'hashType': 'SHA512',
'targetField': '/sha512passcode',
'headerAttribute': 'sha512passcode'
}
)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_hasher = pipeline_builder.add_stage('Field Hasher')
field_hasher.set_attributes(hash_in_place=hash_in_place,
hash_to_target=hash_to_target,
hash_entire_record=True, hash_type='MD5',
header_attribute='myrecord', include_record_header=False,
target_field='/myrecord')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_hasher >> trash
pipeline = pipeline_builder.build('Field Hasher pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
new_header = snapshot[field_hasher.instance_name].output[0].header
new_value = snapshot[field_hasher.instance_name].output[0].field
# assert new header fields are created same as generated value fields
assert new_header['values']['sha1passcode'] == new_value['sha1passcode'].value
assert new_header['values']['myrecord'] == new_value['myrecord'].value
assert new_header['values']['md5passcode'] == new_value['md5passcode'].value
if Version(sdc_builder.version) < Version('3.7.0'):
assert new_header['values']['sha2passcode'] == new_value['sha2passcode'].value
if Version(sdc_executor.version) >= Version('3.7.0'):
assert new_header['values']['sha256passcode'] == new_value['sha256passcode'].value
assert new_header['values']['sha512passcode'] == new_value['sha512passcode'].value
# assert in place record field being hashed as expected
id_hash = hashlib.md5()
id_hash.update(raw_id.encode())
assert new_value['contact']['id'].value == id_hash.hexdigest()
# assert new record field has an expected hash of MD5
passcode_md5hash = hashlib.md5()
passcode_md5hash.update(raw_passcode.encode())
assert new_value['md5passcode'].value == passcode_md5hash.hexdigest()
# assert new record field has an expected hash of SHA1
passcode_sha1hash = hashlib.sha1()
passcode_sha1hash.update(raw_passcode.encode())
assert new_value['sha1passcode'].value == passcode_sha1hash.hexdigest()
# assert new record field has an expected hash of SHA2
passcode_sha2hash = hashlib.sha256()
passcode_sha2hash.update(raw_passcode.encode())
if Version(sdc_builder.version) < Version('3.7.0'):
assert new_value['sha2passcode'].value == passcode_sha2hash.hexdigest()
if Version(sdc_executor.version) >= Version('3.7.0'):
# assert new record field has an expected hash of SHA256
assert new_value['sha256passcode'].value == passcode_sha2hash.hexdigest()
# assert new record field has an expected hash of SHA512
passcode_sha512hash = hashlib.sha512()
passcode_sha512hash.update(raw_passcode.encode())
assert new_value['sha512passcode'].value == passcode_sha512hash.hexdigest()
def test_field_masker(sdc_builder, sdc_executor):
"""Test field masker processor. The pipeline would look like:
dev_raw_data_source >> field_masker >> trash
With the given config below, `donKey` will be masked as `xxxxxxxxxx` (for fixed) and `xxxxxx` (for variable),
`617-567-8888` will be masked as `617-xxx-xxxx`, `94086-6161` will be masked as `940xx`, `30529 - 123-45-6789`
will be masked as `30529xxx123xxxxxxxx` (regex to mask all except groups 1 and 2).
"""
raw_dict = dict(fixed_passwd='donKey', variable_passwd='donKey', custom_ph='617-567-8888',
custom_zip='94086-6161', social='30529 - 123-45-6789')
raw_data = json.dumps(raw_dict)
field_mask_configs = [
{
'fields': ['/fixed_passwd'],
'maskType': 'FIXED_LENGTH',
'regex': '(.*)',
'groupsToShow': '1'
}, {
'fields': ['/variable_passwd'],
'maskType': 'VARIABLE_LENGTH',
'regex': '(.*)',
'groupsToShow': '1'
}, {
'fields': ['/custom_ph'],
'maskType': 'CUSTOM',
'regex': '(.*)',
'groupsToShow': '1',
'mask': '###-xxx-xxxx'
}, {
'fields': ['/custom_zip'],
'maskType': 'CUSTOM',
'regex': '(.*)',
'groupsToShow': '1',
'mask': '###xx'
}, {
'fields': ['/social'],
'maskType': 'REGEX',
'regex': '([0-9]{5}) - ([0-9]{3})-([0-9]{2})-([0-9]{4})',
'groupsToShow': '1,2'
}
]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_masker = pipeline_builder.add_stage('Field Masker')
field_masker.set_attributes(field_mask_configs=field_mask_configs)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_masker >> trash
pipeline = pipeline_builder.build('Field Masker pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
new_value = snapshot[field_masker.instance_name].output[0].field
# assert fixed length mask always have the same masked characters
fixed_value = new_value['fixed_passwd'].value
assert fixed_value == len(fixed_value) * fixed_value[0]
# assert variable length mask always have the same masked characters with original length of the value
variable_value = new_value['variable_passwd'].value
assert variable_value == len(raw_dict['variable_passwd']) * variable_value[0]
# assert custom mask works
assert new_value['custom_ph'].value == '{}-xxx-xxxx'.format(raw_dict['custom_ph'][0:3])
# assert length of custom mask is the mask pattern length. Mask here is '###xx' and hence length of 5
custom_zip = new_value['custom_zip'].value
assert len(custom_zip) == 5 and custom_zip == '{}xx'.format(raw_dict['custom_zip'][0:3])
# assert regular expression mask
match = re.search('([0-9]{5}) - ([0-9]{3})-([0-9]{2})-([0-9]{4})', raw_dict['social'])
assert new_value['social'].value == '{}xxx{}xxxxxxxx'.format(match.group(1), match.group(2))
def test_field_merger(sdc_builder, sdc_executor):
"""Test field merger processor. The pipeline would look like:
dev_raw_data_source >> field_merger >> trash
With the given config below, /contact will move to existing path /dupecontact and /identity will move to new path
/uniqueid.
"""
raw_data = """
{
"contact": {
"city": "San Juan"
},
"dupecontact": {},
"identity": {
"name": "Jane Smith",
"id": "557"
}
}
"""
raw_dict = json.loads(raw_data)
fields_to_merge = [
{
'fromField': '/identity',
'toField': '/uniqueid'
},
{
'fromField': '/contact',
'toField': '/dupecontact'
}
]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_merger = pipeline_builder.add_stage('Field Merger')
field_merger.set_attributes(fields_to_merge=fields_to_merge, overwrite_fields=True)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_merger >> trash
pipeline = pipeline_builder.build('Field Merger pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
new_value = snapshot[field_merger.instance_name].output[0].field
# assert overwrite existing works
assert len(new_value['dupecontact']) > 0
# assert merge works by comparing `identity` to new `uniqueid` dict
dict1 = raw_dict['identity']
dict2 = new_value['uniqueid']
assert len(dict1) == len(dict2)
assert all(source_value == dict2[source_key].value for source_key, source_value in dict1.items())
def test_field_order(sdc_builder, sdc_executor):
"""Test field order processor. The pipeline would look like:
dev_raw_data_source >> field_order >> trash
With given config, 3 ordered_fields will be created by dropping all the rest. /address/home/country is a new
path being created with extra_field_value (eg 'USA').
"""
raw_data = """
{
"name": "Jane Smith",
"id": "557",
"address": {
"home": {
"state": "NC",
"zipcode": "27023"
}
}
}
"""
extra_field = '/address/home/country'
default_value = 'USA'
fields_to_order = ['/address/home/zipcode', '/address/home/state', extra_field]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_order = pipeline_builder.add_stage('Field Order')
field_order.set_attributes(extra_fields='DISCARD', fields_to_order=fields_to_order,
missing_fields='USE_DEFAULT', default_type='STRING',
default_value=default_value, output_type='LIST_MAP')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_order >> trash
pipeline = pipeline_builder.build('Field Order pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
new_value = snapshot[field_order.instance_name].output[0].field
# assert we got ordered fields and we don't have discarded fields
assert fields_to_order == ['/{}'.format(i.replace('"', '').replace('.', '/')) for i in new_value]
# assert we got extra field value as expected
extra_fields = [i for i in new_value if '/{}'.format(i.replace('"', '').replace('.', '/')) == extra_field]
assert new_value[extra_fields[0]] == default_value
def test_field_pivoter(sdc_builder, sdc_executor):
"""Test field pivoter processor. The pipeline would look like:
dev_raw_data_source >> field_pivoter >> trash
With the given config, data will pivot at field_to_pivot (/ballpoint/color_list). In this case by creating
3 records (one for each of ['black', 'blue', 'red']) for one input record.
"""
raw_dict = dict(ballpoint=dict(color_list=['black', 'blue', 'red'], unit_cost='.10'))
raw_data = json.dumps(raw_dict)
field_to_pivot = '/ballpoint/color_list'
field_name_path = '/ballpoint/color_list_path'
pivoted_item_path = '/ballpoint/color'
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_pivoter = pipeline_builder.add_stage('Field Pivoter')
field_pivoter.set_attributes(copy_all_fields=True, field_to_pivot=field_to_pivot,
original_field_name_path=field_name_path, pivoted_items_path=pivoted_item_path,
save_original_field_name=True)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_pivoter >> trash
pipeline = pipeline_builder.build('Field Pivoter pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
new_value = snapshot[field_pivoter.instance_name].output[0].field
# assert our record got pivoted into expected length
assert len(raw_dict['ballpoint']['color_list']) == len(snapshot[field_pivoter.instance_name].output)
# assert pivoted field name is stored in the expected path
assert new_value['ballpoint']['color_list_path'].value == field_to_pivot
def test_field_remover(sdc_builder, sdc_executor):
"""Test field remover processor for three different actions. The pipeline would look like:
dev_raw_data_source >> field_remover1 >> field_remover2 >> field_remover3 >> trash
With given 3 different field remover configs, the field_remover1 will remove /name and /id paths. field_remover2
will remove any input fields which are null - in this case /checknull1 and /checknull2. field_remover3 will remove
all fields except the mention ones - in this case except /address/home/zipcode and /address/home/country
"""
raw_data = """
{
"name": "Jane Smith",
"id": "557",
"checknull1": null,
"checknull2": null,
"address": {
"home": {
"state": "NC",
"zipcode": "27023"
}
}
}
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_remover1 = pipeline_builder.add_stage('Field Remover')
field_remover1.set_attributes(fields=['/id', '/name'], action='REMOVE')
field_remover2 = pipeline_builder.add_stage('Field Remover')
field_remover2.set_attributes(fields=['/checknull1', '/checknull2'], action='REMOVE_NULL')
field_remover3 = pipeline_builder.add_stage('Field Remover')
field_remover3.set_attributes(fields=['/address/home/zipcode', '/address/home/country'], action='KEEP')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_remover1 >> field_remover2 >> field_remover3 >> trash
pipeline = pipeline_builder.build('Field Remover pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
remover1_value = snapshot[field_remover1.instance_name].output[0].field
remover2_value = snapshot[field_remover2.instance_name].output[0].field
remover3_value = snapshot[field_remover3.instance_name].output[0].field
# assert remove listed fields action
assert 'name' not in remover1_value and 'id' not in remover1_value
# assert remove listed fields if their values are null action
assert 'checknull1' not in remover2_value and 'checknull2' not in remover2_value
# assert keep only the listed fields action
assert len(remover3_value) == 1 and 'state' not in remover3_value['address']['home']
def test_field_renamer(sdc_builder, sdc_executor):
"""Test field renamer processor. The pipeline would look like:
dev_raw_data_source >> field_renamer >> trash
With the given config below, based on regex, the incoming fields OPS_name1 and OPS_name2 will be renamed to
name1 and name2 respectively.
"""
strip_word = 'OPS_'
raw_dict = dict(OPS_name1='abc1', OPS_name2='abc2')
raw_data = json.dumps(raw_dict)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_renamer = pipeline_builder.add_stage('Field Renamer')
field_renamer.fields_to_rename = [{'fromFieldExpression': f'(.*){strip_word}(.*)',
'toFieldExpression': '$1$2'}]
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_renamer >> trash
pipeline = pipeline_builder.build('Field Renamer pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
new_value = snapshot[field_renamer.instance_name].output[0].field
for key in raw_dict:
assert key not in new_value and key.strip(strip_word) in new_value
@sdc_min_version('3.7.0')
def test_field_renamer_uppercasing(sdc_builder, sdc_executor):
"""Test uppercasing of all fields - a common action done with the renamer."""
raw_dict = dict(first_key='IPO', second_key='StreamSets')
raw_data = json.dumps(raw_dict)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_renamer = pipeline_builder.add_stage('Field Renamer')
field_renamer.fields_to_rename = [{'fromFieldExpression':'/(.*)',
'toFieldExpression': '/${str:toUpper("$1")}'}]
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_renamer >> trash
pipeline = pipeline_builder.build('Field Renamer pipeline: Upper casing of fields')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
record = snapshot[field_renamer.instance_name].output[0]
assert record.field['FIRST_KEY'] == "IPO"
assert record.field['SECOND_KEY'] == "StreamSets"
@sdc_min_version('3.1.0.0')
def test_field_replacer(sdc_builder, sdc_executor):
"""Test field replacer processor. The pipeline would look like:
dev_raw_data_source >> field_replacer >> trash
With the given replacement rules, we do following:
change value of ssn field to be 'XXX-XX-XXXX',
set ranking field to null,
set statistics field to null if it contains 'NA' or 'not_available'
"""
winners = [dict(ssn='111-11-1111', year='2010', ranking='3', statistics='NA'),
dict(ssn='111-22-1111', year='2011', ranking='2', statistics='2-3-3'),
dict(ssn='111-33-1111', year='2012', ranking='1', statistics='not_available')]
raw_data = ''.join([json.dumps(winner) for winner in winners])
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_replacer = pipeline_builder.add_stage('Field Replacer')
field_replacer.replacement_rules = [{'setToNull': False, 'fields': '/ssn', 'replacement': 'XXX-XX-XXXX'},
{'setToNull': True, 'fields': '/ranking'},
{'setToNull': True,
'fields': '/*[${f:value() == "NA" || f:value() == "not_available"}]'}]
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_replacer >> trash
pipeline = pipeline_builder.build('Field Replacer pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
for rec in winners:
rec['ssn'] = 'XXX-XX-XXXX'
rec['ranking'] = None
if rec['statistics'] == 'NA' or rec['statistics'] == 'not_available':
rec['statistics'] = None
actual_data = [rec.field for rec in snapshot[field_replacer.instance_name].output]
assert actual_data == winners
def test_field_splitter(sdc_builder, sdc_executor):
"""Test field splitter processor. The pipeline would look like:
dev_raw_data_source >> field_splitter >> trash
With given config to process 2 records, the first record's /error/text value will split into /error/code and
/error/message based on separator (,). The second record's /error/text value will split similarly but since it has
too many split, the extra splits will go into new field called /error/etcMessages.
"""
raw_data = """
[ { "error": {
"text": "GM-302,information that you might need"
}
},
{ "error": {
"text": "ME-3042,message about error,additional information from server,network error,driver error"
}
}
]
"""
raw_list = json.loads(raw_data)
separator = ','
split_fields = ['/error/code', '/error/message']
source_sub_field = 'text'
etc_sub_field = 'etcMessages'
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', json_content='ARRAY_OBJECTS', raw_data=raw_data)
field_splitter = pipeline_builder.add_stage('Field Splitter')
field_splitter.set_attributes(field_for_remaining_splits=f'/error/{etc_sub_field}',
field_to_split=f'/error/{source_sub_field}', new_split_fields=split_fields,
not_enough_splits='CONTINUE', original_field='REMOVE',
separator=separator, too_many_splits='TO_LIST')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_splitter >> trash
pipeline = pipeline_builder.build('Field Splitter pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
record_1 = snapshot[field_splitter.instance_name].output[0].field['error']
record_2 = snapshot[field_splitter.instance_name].output[1].field['error']
# assert we got expected number of splits
assert len(raw_list[0]['error']['text'].split(separator)) == len(record_1)
# assert record data
raw_record_data = raw_list[0]['error']['text'].split(separator)
for value in record_1.values():
assert value.value in raw_record_data
# assert field_for_remaining_splits
raw_record_data = raw_list[1]['error']['text'].split(separator)
# etc_sub_field will only have a subset of splits and hence need to take out (subtract) the remaining
assert len(record_2[etc_sub_field]) == len(raw_record_data) - len(split_fields)
for data in record_2[etc_sub_field]:
assert data.value in raw_record_data
# assert original_field being removed
assert source_sub_field not in record_1 and source_sub_field not in record_2
def test_field_type_converter(sdc_builder, sdc_executor):
"""Test field type converter processor. We will use two stages to test field by field type conversion and
data type conversion. The pipeline would look like:
dev_raw_data_source >> field_type_converter_fields >> field_type_converter_types >> trash
With given 2 stages for converter, field_type_converter_fields will convert field for field conversion. For e.g.,
record1's /amInteger will convert from INTEGER to BYTE. field_type_converter_types will convert any field type to
any field type if it matches the type criteria. For e.g., /amDateTime field type DATETIME will convert to LONG.
"""
utc_datetime_str = '1978-01-05 19:38:01'
utc_datetime = datetime.strptime(utc_datetime_str, '%Y-%m-%d %H:%M:%S')
utc_datetime_in_int = int(utc_datetime.strftime('%s')) * 1000 # multiply by 1000 to account for milliseconds
raw_str_value = 'hello again!'
# note, date time here is in UTC. Each map is an SDC record to process.
raw_col = [{'amInteger': 123}, {'amDouble': 12345.6789115}, {'amString': 'hello'}, {'amBool': True},
{'amDateTime': utc_datetime_str}, {'amString2': raw_str_value}, {'amZonedDateTime': None}]
raw_data = json.dumps(raw_col)
field_type_converter_configs = [
{
'fields': ['/amInteger'],
'targetType': 'BYTE',
'dataLocale': 'en,US'
}, {
'fields': ['/amDouble'],
'targetType': 'INTEGER',
'dataLocale': 'en,US'
}, {
'fields': ['/amString'],
'targetType': 'CHAR'
}, {
'fields': ['/amBool'],
'targetType': 'BOOLEAN'
}, {
'fields': ['/amDateTime'],
'targetType': 'DATETIME',
'dateFormat': 'YYYY_MM_DD_HH_MM_SS'
}, {
'fields': ['/amString2'],
'targetType': 'BYTE_ARRAY'
}, {
'fields': ['/amZonedDateTime'],
'targetType': 'ZONED_DATETIME'
}
]
whole_type_converter_configs = [
{
'sourceType': 'BYTE',
'targetType': 'DECIMAL',
'dataLocale': 'en,US',
'scale': -1,
'decimalScaleRoundingStrategy': 'ROUND_UNNECESSARY'
}, {
'sourceType': 'INTEGER',
'targetType': 'SHORT',
'dataLocale': 'en,US'
}, {
'sourceType': 'CHAR',
'targetType': 'STRING',
'treatInputFieldAsDate': False
}, {
'sourceType': 'BOOLEAN',
'targetType': 'STRING',
'treatInputFieldAsDate': False,
'encoding': 'UTF-8'
}, {
'sourceType': 'DATETIME',
'targetType': 'LONG',
'treatInputFieldAsDate': True,
'dateFormat': 'YYYY_MM_DD_HH_MM_SS',
'encoding': 'UTF-8',
'dataLocale': 'en,US'
}, {
'sourceType': 'BYTE_ARRAY',
'targetType': 'STRING',
'treatInputFieldAsDate': False,
'encoding': 'UTF-8'
}, {
'sourceType': 'ZONED_DATETIME',
'targetType': 'STRING',
'encoding': 'UTF-8'
}
]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', json_content='ARRAY_OBJECTS', raw_data=raw_data)
field_type_converter_fields = pipeline_builder.add_stage('Field Type Converter')
field_type_converter_fields.set_attributes(conversion_method='BY_FIELD',
field_type_converter_configs=field_type_converter_configs)
field_type_converter_types = pipeline_builder.add_stage('Field Type Converter')
field_type_converter_types.set_attributes(conversion_method='BY_TYPE',
whole_type_converter_configs=whole_type_converter_configs)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_type_converter_fields >> field_type_converter_types >> trash
pipeline = pipeline_builder.build('Field Type Converter pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# assert field by field type conversion
field_output = snapshot[field_type_converter_fields.instance_name].output
assert field_output[0].field['amInteger'].type == 'BYTE'
assert field_output[1].field['amDouble'].type == 'INTEGER'
assert field_output[2].field['amString'].type == 'CHAR'
assert field_output[3].field['amBool'].type == 'BOOLEAN'
assert field_output[4].field['amDateTime'].type == 'DATETIME'
assert field_output[5].field['amString2'].type == 'BYTE_ARRAY'
assert field_output[6].field['amZonedDateTime'].type == 'ZONED_DATETIME'
# assert data type conversion
type_output = snapshot[field_type_converter_types.instance_name].output
assert type_output[0].field['amInteger'].type == 'DECIMAL'
assert type_output[1].field['amDouble'].type == 'SHORT'
assert type_output[2].field['amString'].type == 'STRING'
assert type_output[3].field['amBool'].type == 'STRING'
assert type_output[4].field['amDateTime'].type == 'LONG'
assert type_output[5].field['amString2'].type == 'STRING'
assert type_output[6].field['amZonedDateTime'].type == 'STRING'
# assert values which can be compared
assert utc_datetime_in_int == int(type_output[4].field['amDateTime'].value)
assert raw_str_value == type_output[5].field['amString2'].value
def test_field_type_converter_long_decimals(sdc_builder, sdc_executor):
"""
This is a test for SDC-10949.
This test creates a raw data -> converter -> trash pipeline. The raw data will contain a decimal (in STRING form) with a high
precision. The converter will convert this to DECIMAL type, and we assert that all digits were preserved in the process.
"""
decimal_str_val = '11235813213455.55342113853211';
raw_data = json.dumps([{'largeDecimal': decimal_str_val}])
field_type_converter_configs = [
{
'fields': ['/largeDecimal'],
'targetType': 'DECIMAL',
'dataLocale': 'en,US',
'scale': -1,
'decimalScaleRoundingStrategy': 'ROUND_UNNECESSARY'
}
]
whole_type_converter_configs = []
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', json_content='ARRAY_OBJECTS', raw_data=raw_data)
field_type_converter_fields = pipeline_builder.add_stage('Field Type Converter')
field_type_converter_fields.set_attributes(conversion_method='BY_FIELD',
field_type_converter_configs=field_type_converter_configs)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_type_converter_fields >> trash
pipeline = pipeline_builder.build('Field Type Converter large decimal pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# assert field coming out of origin is STRING (sanity check)
raw_output = snapshot[dev_raw_data_source.instance_name].output
assert raw_output[0].field['largeDecimal'].type == 'STRING'
# assertions on field coming out of field type converter
field_output = snapshot[field_type_converter_fields.instance_name].output
# assert the type
assert field_output[0].field['largeDecimal'].type == 'DECIMAL'
# and value
assert field_output[0].field['largeDecimal'].value == Decimal(decimal_str_val)
# SDC-11561: File Type Converter doesn't work properly with null in MAP and LIST types
@sdc_min_version('3.9.0') # For the JavaScript processor use
def test_field_type_converter_null_map(sdc_builder, sdc_executor):
"""Make sure that the origin doesn't fail (does a no-op) on a map that is null."""
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('Dev Raw Data Source')
origin.data_format = 'TEXT'
origin.raw_data = 'Does not matter'
origin.stop_after_first_batch = True
javascript = builder.add_stage('JavaScript Evaluator')
javascript.script_record_type = 'SDC_RECORDS'
javascript.init_script = ''
javascript.destroy_script = ''
javascript.script = """
var Field = Java.type('com.streamsets.pipeline.api.Field');
for (var i = 0; i < records.length; i++) {
records[i].sdcRecord.set(Field.create(Field.Type.MAP, null));
output.write(records[i]);
}
"""
converter = builder.add_stage('Field Type Converter')
converter.conversion_method = 'BY_FIELD'
converter.field_type_converter_configs = [{
'fields': ['/somethingSomewhere'],
'targetType': 'DECIMAL',
'dataLocale': 'en,US',
'scale': -1,
'decimalScaleRoundingStrategy': 'ROUND_UNNECESSARY'
}]
trash = builder.add_stage('Trash')
origin >> javascript >> converter >> trash
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# No exceptions, so the pipeline should be in finished state
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert status == 'FINISHED'
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 1
def test_field_zip(sdc_builder, sdc_executor):
"""Test field zip processor. The pipeline would look like:
dev_raw_data_source >> field_zip >> trash
With given config, /basics and /additional will zip to /all path. For e.g., /all will have be a list of
2 zipped maps of {id: 23, inventory:80}, {color: 20005, cost:5} similarly for /itemID and /cost.
"""
raw_data = """
[
{
"order": 23523482,
"itemID": [2, 113, 954, 6502],
"cost": [89.95, 8.95],
"basics": [{"id": 23, "color": 20005}],
"additional": [{"inventory": 80, "cost": 5}]
},
{
"order": 23523481,
"basics": [{"id": 23, "color": 20005}],
"additional": [{"inventory": 80, "cost": 5}]
}
]
"""
raw_list = json.loads(raw_data)
result_key_1 = 'purchase'
result_key_2 = 'all'
fields_to_zip_configs = [
{
'zippedFieldPath': f'/{result_key_1}',
'firstField': '/itemID',
'secondField': '/cost'
}, {
'zippedFieldPath': f'/{result_key_2}',
'firstField': '/basics',
'secondField': '/additional'
}
]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', json_content='ARRAY_OBJECTS', raw_data=raw_data)
field_zip = pipeline_builder.add_stage('Field Zip')
field_zip.set_attributes(field_does_not_exist='CONTINUE', fields_to_zip=fields_to_zip_configs,
zip_values_only=False)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_zip >> trash
pipeline = pipeline_builder.build('Field Zip pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
record_result = snapshot[field_zip.instance_name].output[0].field
# assert we got expected number of merge fields
assert len(raw_list[0]) + len(fields_to_zip_configs) == len(record_result)
# assert data is merged as expected
raw_merge = list(zip(raw_list[0]['itemID'], raw_list[0]['cost']))
record_field_result = record_result[result_key_1]
record_field_merge = [tuple(float(b.value) for b in a.values()) for a in record_field_result]
assert raw_merge == record_field_merge
# assert the missing record fields do not merge anything
assert result_key_1 not in snapshot[field_zip.instance_name].output[1].field
assert result_key_2 not in snapshot[field_zip.instance_name].output[1].field
def test_value_replacer(sdc_builder, sdc_executor):
"""Test Value Replacer processor replacing values in fields. The pipeline would look like:
dev_raw_data_source >> value_replacer >> trash
"""
expected_password_value = 'mysecretcode'
expected_state_value = 'NC'
raw_data = """
{
"contact": {
"fname": "Jane",
"lname": "Smith",
"id": 557,
"address": {
"home": {
"state": "North Carolina",
"zipcode": "27023"
}
},
"password": null,
"state": null
}
}
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
value_replacer = pipeline_builder.add_stage('Value Replacer', type='processor')
value_replacer.set_attributes(conditionally_replace_values=[{
'fieldNames': ['/contact/address/home/state', '/contact/state'],
'operator': 'ALL',
'comparisonValue': 'North Carolina',
'replacementValue': expected_state_value
}], replace_null_values=[{
'fields': ['/contact/password'],
'newValue': expected_password_value
}], fields_to_null=[{
'fieldsToNull': ['/contact/*name'],
'condition': "${record:value('/contact/id') > 0}"
}])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> value_replacer >> trash
pipeline = pipeline_builder.build('Value Replacer pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
new_value = snapshot[value_replacer.instance_name].output[0].field['contact']
# assert fields to null
assert new_value['fname'].value is new_value['lname'].value is None
# assert replace null values
assert expected_password_value == new_value['password'].value
# assert conditionally replace values
assert expected_state_value == new_value['state'].value
assert expected_state_value == new_value['address']['home']['state'].value
@sdc_min_version('3.8.0')
def test_field_mapper_min_max(sdc_builder, sdc_executor):
"""
Test the Field Mapper processor, by path, with an aggregate expression and preserving original paths.
This pipeline calculates the minimum and maximum value of integer fields whose name contains the word "value".
The pipeline that will be constructed is:
dev_raw_data_source (JSON data) >> field_mapper (max value) >> field_mapper (min value) >> trash
"""
raw_data = """{
"sensor_id": "abc123",
"sensor_readings": [
{
"reading_id": "def456",
"value": 87
},{
"reading_id": "ghi789",
"values": [-5, 17, 19]
},{
"reading_id": "jkl012",
"values": [99, -107, 50]
}
],
"outputs": {}
}"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_mapper_max = pipeline_builder.add_stage('Field Mapper', type='processor')
field_mapper_max.set_attributes(
operate_on = 'FIELD_PATHS',
conditional_expression = '${f:type() == \'INTEGER\' and str:startsWith(f:name(), \'value\')}',
mapping_expression = '/outputs/max',
aggregation_expression = '${max(fields)}',
maintain_original_paths = True
)
field_mapper_min = pipeline_builder.add_stage('Field Mapper', type='processor')
field_mapper_min.set_attributes(
operate_on = 'FIELD_PATHS',
conditional_expression = '${f:type() == \'INTEGER\' and str:startsWith(f:name(), \'value\')}',
mapping_expression = '/outputs/min',
aggregation_expression = '${min(fields)}',
maintain_original_paths = True
)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_mapper_max >> field_mapper_min >> trash
pipeline = pipeline_builder.build('Field mapper - sensor reading value min and max pipeline')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
sdc_executor.remove_pipeline(pipeline)
max_processor_output = snapshot[field_mapper_max.instance_name].output
assert max_processor_output[0].get_field_data('/outputs/max').type == 'LONG'
assert max_processor_output[0].get_field_data('/outputs/max').value == 99
# ensure original field was left in place
assert max_processor_output[0].get_field_data('/sensor_readings[2]/values[0]').value == 99
min_processor_output = snapshot[field_mapper_min.instance_name].output
assert min_processor_output[0].get_field_data('/outputs/min').type == 'LONG'
assert min_processor_output[0].get_field_data('/outputs/min').value == -107
# ensure original field was left in place
assert min_processor_output[0].get_field_data('/sensor_readings[2]/values[1]').value == -107
@sdc_min_version('3.8.0')
def test_field_mapper_gather_paths_with_predicate(sdc_builder, sdc_executor):
"""
Test the Field Mapper processor, by path, with an aggregate expression (using previousPath and value to construct a
new map), and preserving original paths.
This pipeline attempts to find all occurences of String field values, or field names, containing "dave" and captures
their original path in the record along with the original field value
The pipeline that will be constructed is:
dev_raw_data_source (JSON data) >> field_mapper (find Daves) >> trash
"""
raw_data = """{
"first": {
"firstSub1": {
"foo": 1,
"bar": 2,
"baz": ["John", "Mike", "Mary", "Dave Smith"]
},
"firstSub2": {
"one": 14,
"two": {
"name": "Joe",
"name2": "Dave Johnson"
}
}
},
"second": {
"karen": 18,
"secondSub": {
"Dave": "Richardson"
}
},
"outputs": {
"daves": []
}
}"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_mapper = pipeline_builder.add_stage('Field Mapper', type='processor')
field_mapper.set_attributes(
operate_on = 'FIELD_PATHS',
conditional_expression = '${str:contains(str:toLower(f:name()), \'dave\') or' +
'(f:type() == \'STRING\' and str:contains(str:toLower(f:value()), \'dave\'))}',
mapping_expression = '/outputs/daves',
aggregation_expression = '${map(fields, fieldByPreviousPath())}',
maintain_original_paths = True
)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_mapper >> trash
pipeline = pipeline_builder.build('Field mapper - The Daves I Know')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
sdc_executor.remove_pipeline(pipeline)
field_mapper_output = snapshot[field_mapper.instance_name].output
daves = field_mapper_output[0].get_field_data('/outputs/daves')
assert len(daves) == 3
assert daves[0]['/first/firstSub1/baz[3]'] == 'Dave Smith'
assert daves[1]['/first/firstSub2/two/name2'] == 'Dave Johnson'
assert daves[2]['/second/secondSub/Dave'] == 'Richardson'
@sdc_min_version('3.8.0')
def test_field_mapper_sanitize_names(sdc_builder, sdc_executor):
"""
Test the Field Mapper processor, by name. For purposes of this test, sanitizing means replacing all "z"s in field
names with "2".
The pipeline that will be constructed is:
dev_raw_data_source (JSON data) >> field_mapper (sanitize names) >> trash
"""
raw_data = """{
"first": {
"firstSub1": {
"zug": 1,
"barz": 2,
"baz": ["blah", "blech", "blargh"]
},
"firstSub2": {
"one": 14,
"twoz": {
"name": "Joe",
"name2": "Dave Johnson"
}
}
},
"second": {
"karen": 18,
"secondSubz": {
"Dave": "Richardson"
}
}
}"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_mapper = pipeline_builder.add_stage('Field Mapper', type='processor')
field_mapper.set_attributes(
operate_on = 'FIELD_NAMES',
mapping_expression = '${str:replaceAll(f:name(), \'z\', \'2\')}',
maintain_original_paths = False
)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_mapper >> trash
pipeline = pipeline_builder.build('Field mapper - The Daves I Know')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
sdc_executor.remove_pipeline(pipeline)
field_mapper_output = snapshot[field_mapper.instance_name].output
output_record = field_mapper_output[0];
first_sub_1 = output_record.get_field_data('/first/firstSub1')
assert first_sub_1.get('zug') == None
assert first_sub_1.get('2ug') == 1
assert first_sub_1.get('barz') == None
assert first_sub_1.get('bar2') == 2
assert first_sub_1.get('baz') == None
assert first_sub_1.get('ba2')[0] == "blah"
assert first_sub_1.get('ba2')[1] == "blech"
assert first_sub_1.get('ba2')[2] == "blargh"
first_sub_2 = output_record.get_field_data('/first/firstSub2')
assert first_sub_2.get('twoz') == None
assert first_sub_2.get('two2').get('name') == 'Joe'
second = output_record.get_field_data('second')
assert second.get('secondSubz') == None
assert second.get('secondSub2').get('Dave') == 'Richardson'
@sdc_min_version('3.8.0')
def test_field_mapper_operate_on_values(sdc_builder, sdc_executor):
"""
Test the Field Mapper processor, by value. Rounds double fields up to the nearest integer (ceiling).
The pipeline that will be constructed is:
dev_raw_data_source (JSON data) >> field_mapper (ceiling) >> trash
"""
raw_data = """{
"someData": {
"value1": 19.2,
"value2": -16.5,
"value3": 1987.44,
"subData": {
"value4": 0.45
}
},
"moreData": {
"value5": 19884.5,
"value6": -0.25
}
}"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
field_mapper = pipeline_builder.add_stage('Field Mapper', type='processor')
field_mapper.set_attributes(
operate_on = 'FIELD_VALUES',
conditional_expression = '${f:type() == \'DOUBLE\'}',
mapping_expression = '${math:ceil(f:value())}',
maintain_original_paths = False
)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> field_mapper >> trash
pipeline = pipeline_builder.build('Field mapper - The Daves I Know')
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
sdc_executor.remove_pipeline(pipeline)
field_mapper_output = snapshot[field_mapper.instance_name].output
output_record = field_mapper_output[0];
assert output_record.get_field_data('/someData/value1') == 20
assert output_record.get_field_data('/someData/value2') == -16
assert output_record.get_field_data('/someData/value3') == 1988
assert output_record.get_field_data('/someData/subData/value4') == 1
assert output_record.get_field_data('/moreData/value5') == 19885
assert output_record.get_field_data('/moreData/value6') == 0
|
import unittest
import pytest
import numpy as np
from yaonet.tensor import Tensor
from yaonet.functional import *
class TestTensorFunstion(unittest.TestCase):
def test_sigmoid(self):
# 90, 30, 150
t1 = Tensor([1, 2, 6], requires_grad=True)
t2 = sigmoid(t1)
# assert
np.testing.assert_array_almost_equal(t2.data, 1 / (1+ np.exp([-1, -2, -6])))
t2.backward(Tensor([-1., -2., -3.]))
def sig(t):
return 1 / (1 + np.exp(-t))
truth = [-1,-2,-3]*sig(t1.data)*(1-sig(t1.data))
np.testing.assert_array_almost_equal(t1.grad, truth)
def test_tanh(self):
t1 = Tensor([1, 2, 6], requires_grad=True)
t2 = tanh(t1)
np.testing.assert_array_almost_equal(t2.data, np.tanh(t1.data))
t2.backward(Tensor([-1., -2., -3.]))
np.testing.assert_array_almost_equal(t1.grad, [-1,-2,-3]*(1-np.power(t2.data,2 )))
def test_relu(self):
t1 = Tensor([-1, 0, 2], requires_grad=True)
t2 = relu(t1)
assert t2.data.tolist() == [0,0,2]
t2.backward(Tensor([-1., -2., -3.]))
assert t1.grad.tolist() == [0, 0, -6] |
import pytest
from flask_restful import Resource
from flask_restful_swagger.swagger import SwaggerEndpoint, operation
from .lib.helpers import TestCaseSupport
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class MockDataType(object):
pass
tc = TestCaseSupport()
tc.maxDiff = None
@patch("flask_restful_swagger.swagger.extract_swagger_path")
@patch("flask_restful_swagger.swagger.extract_path_arguments")
@patch("flask_restful_swagger.swagger._parse_doc")
@patch("flask_restful_swagger.swagger.SwaggerEndpoint.extract_operations")
def test_swagger_endpoint(operations, docs, args, path):
path.return_value = "Sometime Soon"
args.return_value = "I Will Return"
docs.return_value = ("A Description Will Follow", "As to Where to Meet")
operations.return_value = ["knee surgery", "back surgery"]
endpoint = SwaggerEndpoint("Fake Resource", "/some/path")
assert path.called
assert args.called
assert docs.called
assert operations.called
assert endpoint.path == "Sometime Soon"
assert endpoint.description == "A Description Will Follow"
assert endpoint.notes == "As to Where to Meet"
assert endpoint.operations == ["knee surgery", "back surgery"]
operations.assert_called_once_with("Fake Resource", "I Will Return")
def test_swagger_endpoint_extract_operations_empty():
class MockResource(Resource):
def get(self):
return "OK", 200, {"Access-Control-Allow-Origin": "*"}
assert SwaggerEndpoint.extract_operations(MockResource, []) == []
@pytest.mark.parametrize(
"mock_properties, update_with",
[
(
{
"name": "one",
"method": "get",
"other": MockDataType,
"parameters": [
{
"name": "identifier",
"description": "identifier",
"required": True,
"allowMultiple": False,
"dataType": "string",
"paramType": "path",
},
{
"name": "identifier2",
"description": "identifier2",
"required": True,
"allowMultiple": False,
"dataType": "float",
"paramType": "path",
},
],
},
{
"method": "get<br/>get",
"nickname": "nickname",
"summary": None,
"notes": None,
"other": "MockDataType",
},
),
],
)
@patch("flask_restful_swagger.swagger._get_current_registry")
def test_get_swagger_endpoint_not_subclassed_basic_example(
registry, mock_properties, update_with
):
registry.return_value = {
"apiVersion": "mock_version",
"swaggerVersion": "mock_swagger_version",
"basePath": "mock_path",
"spec_endpoint_path": "mock_spec_endpoint_path",
"description": "mock_description",
}
class MockResource(Resource):
@operation(**mock_properties)
def get(self):
return "OK", 200, {"Access-Control-Allow-Origin": "*"}
return_value = SwaggerEndpoint.extract_operations(
MockResource,
[
{"name": "identifier", "dataType": "string", "paramType": "path"},
{"name": "identifier2", "dataType": "float", "paramType": "path"},
],
)
mock_properties.update(update_with)
tc.assertDictEqual(return_value[0], mock_properties)
@pytest.mark.parametrize(
"mock_properties, update_with",
[
(
{
"name": "one",
"method": "get",
"other": MockDataType,
"parameters": [
{
"name": "identifier",
"description": "identifier",
"required": True,
"allowMultiple": False,
"dataType": "string",
"paramType": "path",
},
{
"name": "identifier2",
"description": "identifier2",
"required": True,
"allowMultiple": False,
"dataType": "float",
"paramType": "path",
},
],
},
{
"method": "get<br/>get",
"nickname": "nickname",
"summary": None,
"notes": None,
"other": "MockDataType",
},
),
],
)
@patch("flask_restful_swagger.swagger._get_current_registry")
def test_get_swagger_endpoint_subclassed_basic_example(
registry, mock_properties, update_with
):
registry.return_value = {
"apiVersion": "mock_version",
"swaggerVersion": "mock_swagger_version",
"basePath": "mock_path",
"spec_endpoint_path": "mock_spec_endpoint_path",
"description": "mock_description",
}
class MockResource(Resource):
@operation(**mock_properties)
def get(self):
return "OK", 200, {"Access-Control-Allow-Origin": "*"}
class MockSubClass(MockResource):
pass
return_value = SwaggerEndpoint.extract_operations(
MockSubClass,
[
{"name": "identifier", "dataType": "string", "paramType": "path"},
{"name": "identifier2", "dataType": "float", "paramType": "path"},
],
)
mock_properties.update(update_with)
tc.assertDictEqual(return_value[0], mock_properties)
|
import pygame
from src.constant import WIDTH, HEIGTH
class Player:
def __init__(self):
self.obj = pygame.Rect((WIDTH - 90), (HEIGTH / 2 - 70), 20, 140)
self.speed = 0
def control_speed(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
self.speed += 7
if event.key == pygame.K_UP:
self.speed -= 7
if event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
self.speed -= 7
if event.key == pygame.K_UP:
self.speed += 7
def movement(self):
self.obj.y += self.speed
if self.obj.top <= 0:
self.obj.top = 0
if self.obj.bottom >= HEIGTH:
self.obj.bottom = HEIGTH
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FeatureGroupArgs', 'FeatureGroup']
@pulumi.input_type
class FeatureGroupArgs:
def __init__(__self__, *,
event_time_feature_name: pulumi.Input[str],
feature_definitions: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]],
feature_group_name: pulumi.Input[str],
record_identifier_feature_name: pulumi.Input[str],
role_arn: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input['FeatureGroupOfflineStoreConfigArgs']] = None,
online_store_config: Optional[pulumi.Input['FeatureGroupOnlineStoreConfigArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a FeatureGroup resource.
:param pulumi.Input[str] event_time_feature_name: The name of the feature that stores the EventTime of a Record in a Feature Group.
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: A list of Feature names and types. See Feature Definition Below.
:param pulumi.Input[str] feature_group_name: The name of the Feature Group. The name must be unique within an AWS Region in an AWS account.
:param pulumi.Input[str] record_identifier_feature_name: The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an `offline_store_config` is provided.
:param pulumi.Input[str] description: A free-form description of a Feature Group.
:param pulumi.Input['FeatureGroupOfflineStoreConfigArgs'] offline_store_config: The Offline Feature Store Configuration. See Offline Store Config Below.
:param pulumi.Input['FeatureGroupOnlineStoreConfigArgs'] online_store_config: The Online Feature Store Configuration. See Online Store Config Below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of resource tags for the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
pulumi.set(__self__, "event_time_feature_name", event_time_feature_name)
pulumi.set(__self__, "feature_definitions", feature_definitions)
pulumi.set(__self__, "feature_group_name", feature_group_name)
pulumi.set(__self__, "record_identifier_feature_name", record_identifier_feature_name)
pulumi.set(__self__, "role_arn", role_arn)
if description is not None:
pulumi.set(__self__, "description", description)
if offline_store_config is not None:
pulumi.set(__self__, "offline_store_config", offline_store_config)
if online_store_config is not None:
pulumi.set(__self__, "online_store_config", online_store_config)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Input[str]:
"""
The name of the feature that stores the EventTime of a Record in a Feature Group.
"""
return pulumi.get(self, "event_time_feature_name")
@event_time_feature_name.setter
def event_time_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "event_time_feature_name", value)
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]:
"""
A list of Feature names and types. See Feature Definition Below.
"""
return pulumi.get(self, "feature_definitions")
@feature_definitions.setter
def feature_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]):
pulumi.set(self, "feature_definitions", value)
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> pulumi.Input[str]:
"""
The name of the Feature Group. The name must be unique within an AWS Region in an AWS account.
"""
return pulumi.get(self, "feature_group_name")
@feature_group_name.setter
def feature_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "feature_group_name", value)
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Input[str]:
"""
The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store.
"""
return pulumi.get(self, "record_identifier_feature_name")
@record_identifier_feature_name.setter
def record_identifier_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "record_identifier_feature_name", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an `offline_store_config` is provided.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A free-form description of a Feature Group.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> Optional[pulumi.Input['FeatureGroupOfflineStoreConfigArgs']]:
"""
The Offline Feature Store Configuration. See Offline Store Config Below.
"""
return pulumi.get(self, "offline_store_config")
@offline_store_config.setter
def offline_store_config(self, value: Optional[pulumi.Input['FeatureGroupOfflineStoreConfigArgs']]):
pulumi.set(self, "offline_store_config", value)
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> Optional[pulumi.Input['FeatureGroupOnlineStoreConfigArgs']]:
"""
The Online Feature Store Configuration. See Online Store Config Below.
"""
return pulumi.get(self, "online_store_config")
@online_store_config.setter
def online_store_config(self, value: Optional[pulumi.Input['FeatureGroupOnlineStoreConfigArgs']]):
pulumi.set(self, "online_store_config", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of resource tags for the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _FeatureGroupState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input['FeatureGroupOfflineStoreConfigArgs']] = None,
online_store_config: Optional[pulumi.Input['FeatureGroupOnlineStoreConfigArgs']] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering FeatureGroup resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this feature_group.
:param pulumi.Input[str] description: A free-form description of a Feature Group.
:param pulumi.Input[str] event_time_feature_name: The name of the feature that stores the EventTime of a Record in a Feature Group.
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: A list of Feature names and types. See Feature Definition Below.
:param pulumi.Input[str] feature_group_name: The name of the Feature Group. The name must be unique within an AWS Region in an AWS account.
:param pulumi.Input['FeatureGroupOfflineStoreConfigArgs'] offline_store_config: The Offline Feature Store Configuration. See Offline Store Config Below.
:param pulumi.Input['FeatureGroupOnlineStoreConfigArgs'] online_store_config: The Online Feature Store Configuration. See Online Store Config Below.
:param pulumi.Input[str] record_identifier_feature_name: The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an `offline_store_config` is provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of resource tags for the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if description is not None:
pulumi.set(__self__, "description", description)
if event_time_feature_name is not None:
pulumi.set(__self__, "event_time_feature_name", event_time_feature_name)
if feature_definitions is not None:
pulumi.set(__self__, "feature_definitions", feature_definitions)
if feature_group_name is not None:
pulumi.set(__self__, "feature_group_name", feature_group_name)
if offline_store_config is not None:
pulumi.set(__self__, "offline_store_config", offline_store_config)
if online_store_config is not None:
pulumi.set(__self__, "online_store_config", online_store_config)
if record_identifier_feature_name is not None:
pulumi.set(__self__, "record_identifier_feature_name", record_identifier_feature_name)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this feature_group.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A free-form description of a Feature Group.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the feature that stores the EventTime of a Record in a Feature Group.
"""
return pulumi.get(self, "event_time_feature_name")
@event_time_feature_name.setter
def event_time_feature_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "event_time_feature_name", value)
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]]:
"""
A list of Feature names and types. See Feature Definition Below.
"""
return pulumi.get(self, "feature_definitions")
@feature_definitions.setter
def feature_definitions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]]):
pulumi.set(self, "feature_definitions", value)
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Feature Group. The name must be unique within an AWS Region in an AWS account.
"""
return pulumi.get(self, "feature_group_name")
@feature_group_name.setter
def feature_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_group_name", value)
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> Optional[pulumi.Input['FeatureGroupOfflineStoreConfigArgs']]:
"""
The Offline Feature Store Configuration. See Offline Store Config Below.
"""
return pulumi.get(self, "offline_store_config")
@offline_store_config.setter
def offline_store_config(self, value: Optional[pulumi.Input['FeatureGroupOfflineStoreConfigArgs']]):
pulumi.set(self, "offline_store_config", value)
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> Optional[pulumi.Input['FeatureGroupOnlineStoreConfigArgs']]:
"""
The Online Feature Store Configuration. See Online Store Config Below.
"""
return pulumi.get(self, "online_store_config")
@online_store_config.setter
def online_store_config(self, value: Optional[pulumi.Input['FeatureGroupOnlineStoreConfigArgs']]):
pulumi.set(self, "online_store_config", value)
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store.
"""
return pulumi.get(self, "record_identifier_feature_name")
@record_identifier_feature_name.setter
def record_identifier_feature_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "record_identifier_feature_name", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an `offline_store_config` is provided.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of resource tags for the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class FeatureGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['FeatureGroupOfflineStoreConfigArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['FeatureGroupOnlineStoreConfigArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a SageMaker Feature Group resource.
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.FeatureGroup("example",
feature_group_name="example",
record_identifier_feature_name="example",
event_time_feature_name="example",
role_arn=aws_iam_role["test"]["arn"],
feature_definitions=[aws.sagemaker.FeatureGroupFeatureDefinitionArgs(
feature_name="example",
feature_type="String",
)],
online_store_config=aws.sagemaker.FeatureGroupOnlineStoreConfigArgs(
enable_online_store=True,
))
```
## Import
Feature Groups can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:sagemaker/featureGroup:FeatureGroup test_feature_group feature_group-foo
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: A free-form description of a Feature Group.
:param pulumi.Input[str] event_time_feature_name: The name of the feature that stores the EventTime of a Record in a Feature Group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: A list of Feature names and types. See Feature Definition Below.
:param pulumi.Input[str] feature_group_name: The name of the Feature Group. The name must be unique within an AWS Region in an AWS account.
:param pulumi.Input[pulumi.InputType['FeatureGroupOfflineStoreConfigArgs']] offline_store_config: The Offline Feature Store Configuration. See Offline Store Config Below.
:param pulumi.Input[pulumi.InputType['FeatureGroupOnlineStoreConfigArgs']] online_store_config: The Online Feature Store Configuration. See Online Store Config Below.
:param pulumi.Input[str] record_identifier_feature_name: The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an `offline_store_config` is provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of resource tags for the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FeatureGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a SageMaker Feature Group resource.
## Example Usage
Basic usage:
```python
import pulumi
import pulumi_aws as aws
example = aws.sagemaker.FeatureGroup("example",
feature_group_name="example",
record_identifier_feature_name="example",
event_time_feature_name="example",
role_arn=aws_iam_role["test"]["arn"],
feature_definitions=[aws.sagemaker.FeatureGroupFeatureDefinitionArgs(
feature_name="example",
feature_type="String",
)],
online_store_config=aws.sagemaker.FeatureGroupOnlineStoreConfigArgs(
enable_online_store=True,
))
```
## Import
Feature Groups can be imported using the `name`, e.g.,
```sh
$ pulumi import aws:sagemaker/featureGroup:FeatureGroup test_feature_group feature_group-foo
```
:param str resource_name: The name of the resource.
:param FeatureGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FeatureGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['FeatureGroupOfflineStoreConfigArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['FeatureGroupOnlineStoreConfigArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = description
if event_time_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'event_time_feature_name'")
__props__.__dict__["event_time_feature_name"] = event_time_feature_name
if feature_definitions is None and not opts.urn:
raise TypeError("Missing required property 'feature_definitions'")
__props__.__dict__["feature_definitions"] = feature_definitions
if feature_group_name is None and not opts.urn:
raise TypeError("Missing required property 'feature_group_name'")
__props__.__dict__["feature_group_name"] = feature_group_name
__props__.__dict__["offline_store_config"] = offline_store_config
__props__.__dict__["online_store_config"] = online_store_config
if record_identifier_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'record_identifier_feature_name'")
__props__.__dict__["record_identifier_feature_name"] = record_identifier_feature_name
if role_arn is None and not opts.urn:
raise TypeError("Missing required property 'role_arn'")
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["tags_all"] = None
super(FeatureGroup, __self__).__init__(
'aws:sagemaker/featureGroup:FeatureGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['FeatureGroupOfflineStoreConfigArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['FeatureGroupOnlineStoreConfigArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'FeatureGroup':
"""
Get an existing FeatureGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) assigned by AWS to this feature_group.
:param pulumi.Input[str] description: A free-form description of a Feature Group.
:param pulumi.Input[str] event_time_feature_name: The name of the feature that stores the EventTime of a Record in a Feature Group.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: A list of Feature names and types. See Feature Definition Below.
:param pulumi.Input[str] feature_group_name: The name of the Feature Group. The name must be unique within an AWS Region in an AWS account.
:param pulumi.Input[pulumi.InputType['FeatureGroupOfflineStoreConfigArgs']] offline_store_config: The Offline Feature Store Configuration. See Offline Store Config Below.
:param pulumi.Input[pulumi.InputType['FeatureGroupOnlineStoreConfigArgs']] online_store_config: The Online Feature Store Configuration. See Online Store Config Below.
:param pulumi.Input[str] record_identifier_feature_name: The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store.
:param pulumi.Input[str] role_arn: The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an `offline_store_config` is provided.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of resource tags for the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FeatureGroupState.__new__(_FeatureGroupState)
__props__.__dict__["arn"] = arn
__props__.__dict__["description"] = description
__props__.__dict__["event_time_feature_name"] = event_time_feature_name
__props__.__dict__["feature_definitions"] = feature_definitions
__props__.__dict__["feature_group_name"] = feature_group_name
__props__.__dict__["offline_store_config"] = offline_store_config
__props__.__dict__["online_store_config"] = online_store_config
__props__.__dict__["record_identifier_feature_name"] = record_identifier_feature_name
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return FeatureGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) assigned by AWS to this feature_group.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A free-form description of a Feature Group.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Output[str]:
"""
The name of the feature that stores the EventTime of a Record in a Feature Group.
"""
return pulumi.get(self, "event_time_feature_name")
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Output[Sequence['outputs.FeatureGroupFeatureDefinition']]:
"""
A list of Feature names and types. See Feature Definition Below.
"""
return pulumi.get(self, "feature_definitions")
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> pulumi.Output[str]:
"""
The name of the Feature Group. The name must be unique within an AWS Region in an AWS account.
"""
return pulumi.get(self, "feature_group_name")
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> pulumi.Output[Optional['outputs.FeatureGroupOfflineStoreConfig']]:
"""
The Offline Feature Store Configuration. See Offline Store Config Below.
"""
return pulumi.get(self, "offline_store_config")
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> pulumi.Output[Optional['outputs.FeatureGroupOnlineStoreConfig']]:
"""
The Online Feature Store Configuration. See Online Store Config Below.
"""
return pulumi.get(self, "online_store_config")
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Output[str]:
"""
The name of the Feature whose value uniquely identifies a Record defined in the Feature Store. Only the latest record per identifier value will be stored in the Online Store.
"""
return pulumi.get(self, "record_identifier_feature_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the Offline Store if an `offline_store_config` is provided.
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of resource tags for the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
|
from .regening import Regening
from .swelling import Swelling |
import os
import hmac
import time
import hashlib
import requests
import numpy as np
import pandas as pd
from dotenv import load_dotenv
from urllib.parse import urlencode
from supportLibEasyT.log_manager import LogManager
class CredentialsNotFound(BaseException):
"""Raise this error when the key or the BINANCE_API_SECRET are not found, it does not prevent if the key or the BINANCE_API_SECRET are wrong."""
def setup_environment(log) -> (str, str, str):
"""
This function are responsible to check if the credentials are available, it is used to prevent future problems.
Args:
log:
Receives the log handler to handle this support function.
Raises:
CredentialsNotFound:
This error returns when the key is missing, empty or invalid.
Returns:
"""
log.info('Setting up the environment.')
load_dotenv()
log.info('Retrieving the base URL')
url_base = os.environ.get('BINANCE_BASE_URL')
log.info(f'URL retrieved: {url_base}')
key = os.environ.get('BINANCE_API_KEY')
secret = os.environ.get('BINANCE_SECRET_KEY')
if key is None or secret is None:
log.error('Your Binance Key or Secret are empty. You must have these information.')
raise CredentialsNotFound
elif key == '<insert your credential here>' or secret == '<insert your credential here>':
log.error('Your Binance Key or Secret was not provided. You must have these information.')
raise CredentialsNotFound
return url_base, key, secret
def get_price_last(url_base: str, symbol: str) -> str:
"""
This function is used to get the last price of a determined symbol, the last price is the most recent one.
Args:
url_base:
url_base is the parameter containing the principal URL to call the endpoint.
There are many kind of url_base, usually one for test and the other for real transaction.
symbol:
The symbol you want the most recent price.
Returns:
It returns the price in a string format
"""
url_price_last = "/api/v3/ticker/price"
price_last = requests.get(url_base + url_price_last, params={"symbol": symbol})
price_last.raise_for_status()
return price_last.json()['price']
def get_account(log: LogManager,
url_base: str,
key: str,
secret: str) -> dict:
"""
This functions returns User's account information.
Args:
log:
The log receives a log handler to be able to log the information
url_base:
url_base is the parameter containing the principal URL to call the endpoint.
There are many kind of url_base, usually one for test and the other for real transaction.
key:
It is the key used to authenticate transaction for Binance
secret:
It is the secret used to authenticate transaction for Binance
Returns:
The return is a JSON object that contains account information
"""
log.info('Get account information from Binance Spot')
time_stamp = int(time.time() * 1000)
payload = urlencode({
"timestamp": time_stamp,
})
signature = hmac.new(secret.encode('utf-8'), payload.encode('utf-8'), hashlib.sha256).hexdigest()
account = requests.get(url_base + "/api/v3/account",
params={"timestamp": time_stamp,
"signature": signature, },
headers={"X-MBX-APIKEY": key, })
account.raise_for_status()
return account.json()
def get_symbol_asset_balance(log: LogManager,
url_base: str,
key: str,
secret: str,
symbol: str) -> float:
"""
Args:
log:
The log receives a log handler to be able to log the information
url_base:
url_base is the parameter containing the principal URL to call the endpoint.
There are many kind of url_base, usually one for test and the other for real transaction.
key:
It is the key used to authenticate transaction for Binance
secret:
It is the secret used to authenticate transaction for Binance
symbol:
The symbol you want to know how much of that currency you have.
Returns:
A float number with the amount of a specific currency asked for
"""
log.info(f'Get the asset balance for {symbol} Binance Spot')
account = get_account(log, url_base, key, secret)
balances = pd.DataFrame(account['balances'])
mask_balance = balances['asset'].values == symbol[:3]
return balances[mask_balance]['free'].astype(np.float64).item()
|
# coding:utf-8
"""
module for pixar USD
"""
__version__ = '0.0.0'
|
ACCESS_TOKEN_URL = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s"
SERVER_IP_URL = "https://api.weixin.qq.com/cgi-bin/getcallbackip?access_token=%s"
APP_ID = "wx3eea165d584f5864"
SECRET_ID = "d3c237a1bf28009bb5b7e82291f662f8"
REDIS_URL = "127.0.0.1"
REDIS_PORT = 6379
REDIS_DB = 0
|
from itertools import cycle
def reversi_row(moves):
row = ['.'] * 8
player = cycle('*O')
for move in moves:
current_player = next(player)
invalid = {'.', current_player}
row[move] = current_player
row_str = ''.join(row)
# look left
first_left = row_str.rfind(current_player, 0, move)
if first_left != -1:
# flip to the left
left_chunk = row_str[first_left + 1:move]
if not invalid.intersection(left_chunk):
row[first_left + 1:move] = [current_player] * len(left_chunk)
# look right
first_right = row_str.find(current_player, move + 1)
if first_right != -1:
# flip to the right
right_chunk = row_str[move + 1:first_right]
if not invalid.intersection(right_chunk):
row[move + 1:first_right] = [current_player] * len(right_chunk)
return ''.join(row)
|
"""
This examples show how to train a Cross-Encoder for the MS Marco dataset (https://github.com/microsoft/MSMARCO-Passage-Ranking).
The query and the passage are passed simoultanously to a Transformer network. The network then returns
a score between 0 and 1 how relevant the passage is for a given query.
The resulting Cross-Encoder can then be used for passage re-ranking: You retrieve for example 100 passages
for a given query, for example with ElasticSearch, and pass the query+retrieved_passage to the CrossEncoder
for scoring. You sort the results then according to the output of the CrossEncoder.
This gives a significant boost compared to out-of-the-box ElasticSearch / BM25 ranking.
Running this script:
python train_cross-encoder.py
"""
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CERerankingEvaluator
from sentence_transformers import InputExample
import logging
from datetime import datetime
import gzip
import os
import tarfile
import tqdm
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
#First, we define the transformer model we want to fine-tune
model_name = 'distilroberta-base'
train_batch_size = 32
num_epochs = 1
model_save_path = 'output/training_ms-marco_cross-encoder-'+model_name.replace("/", "-")+'-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We train the network with as a binary label task
# Given [query, passage] is the label 0 = irrelevant or 1 = relevant?
# We use a positive-to-negative ratio: For 1 positive sample (label 1) we include 4 negative samples (label 0)
# in our training setup. For the negative samples, we use the triplets provided by MS Marco that
# specify (query, positive sample, negative sample).
pos_neg_ration = 4
# Maximal number of training samples we want to use
max_train_samples = 2e7
#We set num_labels=1, which predicts a continous score between 0 and 1
model = CrossEncoder(model_name, num_labels=1, max_length=512)
### Now we read the MS Marco dataset
data_folder = 'msmarco-data'
os.makedirs(data_folder, exist_ok=True)
#### Read the corpus files, that contain all the passages. Store them in the corpus dict
corpus = {}
collection_filepath = os.path.join(data_folder, 'collection.tsv')
if not os.path.exists(collection_filepath):
tar_filepath = os.path.join(data_folder, 'collection.tar.gz')
if not os.path.exists(tar_filepath):
logging.info("Download collection.tar.gz")
util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz', tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(collection_filepath, 'r', encoding='utf8') as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
corpus[pid] = passage
### Read the train queries, store in queries dict
queries = {}
queries_filepath = os.path.join(data_folder, 'queries.train.tsv')
if not os.path.exists(queries_filepath):
tar_filepath = os.path.join(data_folder, 'queries.tar.gz')
if not os.path.exists(tar_filepath):
logging.info("Download queries.tar.gz")
util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/queries.tar.gz', tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(queries_filepath, 'r', encoding='utf8') as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
queries[qid] = query
### Now we create our training & dev data
train_samples = []
dev_samples = {}
# We use 200 random queries from the train set for evaluation during training
# Each query has at least one relevant and up to 200 irrelevant (negative) passages
num_dev_queries = 200
num_max_dev_negatives = 200
# msmarco-qidpidtriples.rnd-shuf.train-eval.tsv.gz and msmarco-qidpidtriples.rnd-shuf.train.tsv.gz is a randomly
# shuffled version of qidpidtriples.train.full.2.tsv.gz from the MS Marco website
# We extracted in the train-eval split 500 random queries that can be used for evaluation during training
train_eval_filepath = os.path.join(data_folder, 'msmarco-qidpidtriples.rnd-shuf.train-eval.tsv.gz')
if not os.path.exists(train_eval_filepath):
logging.info("Download "+os.path.basename(train_eval_filepath))
util.http_get('https://sbert.net/datasets/msmarco-qidpidtriples.rnd-shuf.train-eval.tsv.gz', train_eval_filepath)
with gzip.open(train_eval_filepath, 'rt') as fIn:
for line in fIn:
qid, pos_id, neg_id = line.strip().split()
if qid not in dev_samples and len(dev_samples) < num_dev_queries:
dev_samples[qid] = {'query': queries[qid], 'positive': set(), 'negative': set()}
if qid in dev_samples:
dev_samples[qid]['positive'].add(corpus[pos_id])
if len(dev_samples[qid]['negative']) < num_max_dev_negatives:
dev_samples[qid]['negative'].add(corpus[neg_id])
# Read our training file
train_filepath = os.path.join(data_folder, 'msmarco-qidpidtriples.rnd-shuf.train.tsv.gz')
if not os.path.exists(train_filepath):
logging.info("Download "+os.path.basename(train_filepath))
util.http_get('https://sbert.net/datasets/msmarco-qidpidtriples.rnd-shuf.train.tsv.gz', train_filepath)
cnt = 0
with gzip.open(train_filepath, 'rt') as fIn:
for line in tqdm.tqdm(fIn, unit_scale=True):
qid, pos_id, neg_id = line.strip().split()
if qid in dev_samples:
continue
query = queries[qid]
if (cnt % (pos_neg_ration+1)) == 0:
passage = corpus[pos_id]
label = 1
else:
passage = corpus[neg_id]
label = 0
train_samples.append(InputExample(texts=[query, passage], label=label))
cnt += 1
if cnt >= max_train_samples:
break
# We create a DataLoader to load our train samples
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
# It performs a classification task and measures scores like F1 (finding relevant passages) and Average Precision
evaluator = CERerankingEvaluator(dev_samples, name='train-eval')
# Configure the training
warmup_steps = 5000
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path,
use_amp=True)
#Save latest model
model.save(model_save_path+'-latest') |
import sys
from google_utils import *
from ping import ping_pool
import configparser
from time import sleep
def read_config(path):
if not os.path.exists(path):
retun = 0
config_file = configparser.ConfigParser()
config_file.read(path)
config = {
'scopes': config_file.get('main', 'scopes'),
'credentials_filename': config_file.get('main', 'credentials_filename'),
'token_filename': config_file.get('main', 'token_filename'),
'spreadsheet_id': config_file.get('main', 'spreadsheet_id'),
'sheet_number': config_file.get('main', 'sheet_number'),
'first_ip_cell': config_file.get('main', 'first_ip_cell'),
'first_ms_cell': config_file.get('main', 'first_ms_cell'),
'paralel_work': config_file.get('main', 'paralel_work'),
}
return config
def main(config):
scopes = config['scopes']
credentials_filename = config['credentials_filename']
token_filename = config['token_filename']
spreadsheet_id = config['spreadsheet_id']
sheet_number = int(config['sheet_number'])
first_ip_cell = config['first_ip_cell']
first_ms_cell = config['first_ms_cell']
paralel_work = int(config['paralel_work'])
spreadsheets = auth(
scopes,
credentials_filename,
token_filename
)
sheet_info = get_sheets_info(spreadsheets, spreadsheet_id, sheet_number)
del_all_conditional_format(
spreadsheets, spreadsheet_id, sheet_info)
addFormatting(spreadsheets, spreadsheet_id, sheet_info, first_ms_cell)
host_list = get_host_list(
spreadsheets,
spreadsheet_id,
first_ip_cell,
sheet_info
)
ping_results = ping_pool(host_list, paralel_work)
push_update_values(
spreadsheets,
spreadsheet_id,
first_ms_cell,
sheet_info,
ping_results,
)
if __name__ == '__main__':
config = read_config('config.ini')
if len(sys.argv) > 2 and sys.argv[1] == '-d':
while True:
main(config)
sleep(int(sys.argv[2]))
else:
main(config)
|
#!usr/bin/python
from math import exp
from . import types
import typing
class FungicideEffect(object):
def __init__(self, maxEffect: types.Number, curvatureFactor: types.Number) -> None:
self.maxEffect = maxEffect
self.curvatureFactor = curvatureFactor
def effect(self, dose: types.Number) -> types.Number:
return self.maxEffect * (1 - exp(-self.curvatureFactor * dose))
class Fungicide(object):
def __init__(self, decayRate: float, **effects: FungicideEffect) -> None:
self.decayRate = decayRate
self.effects = {k: effects[k] for k in effects}
def effect(self, dose: types.Number, effect: str) -> types.Number:
if effect not in self.effects:
return 0
else:
return self.effects[effect].effect(dose)
@staticmethod
def combineEffects(fungicides: typing.List["Fungicide"], doses: typing.List[types.Number], effect: str):
complement = 1
for fungicide, dose in zip(fungicides, doses):
complement *= (1 - fungicide.effect(dose, effect))
return 1 - complement
def __eq__(self, other: object) -> bool:
return self.__dict__ == other.__dict__
|
import numpy as np
class CARTTree:
def __init__(self, value=None, trueBranch=None, falseBranch=None, results=None, col=-1, summary=None, data=None):
self.value = value
self.trueBranch = trueBranch
self.falseBranch = falseBranch
self.results = results
self.col = col
self.summary = summary
self.data = data
def __str__(self):
print(self.col, self.value)
print(self.results)
print(self.summary)
return ""
def gini(dataset):
# 计算基尼指数
data_num = len(dataset)
# 以下部分用于统计每个类别出现的个数,并组成一个字典,存在result中
results = {}
for data in dataset:
# data[-1] means dataType
if data[-1] not in results:
results.setdefault(data[-1], 1)
else:
results[data[-1]] += 1
gini_result = 0
gini_result = float(gini_result)
for i in results:
gini_result += (results[i] / data_num) * (results[i] / data_num)
return 1 - gini_result
def chooseSplitData(dataset, value, column):
# 根据条件分离数据集(splitDatas by value, column)
# return 2 part(list1, list2)
lefttree = []
righttree = []
if isinstance(value, int) or isinstance(value, float):
for row in dataset:
if row[column] >= value:
lefttree.append(row)
else:
righttree.append(row)
else:
for row in dataset:
if row[column] == value:
lefttree.append(row)
else:
righttree.append(row)
return lefttree, righttree
def buildTree(rows):
# 递归建立决策树, 当gain=0,时停止回归
# build decision tree bu recursive function
# stop recursive function when gain = 0
# return tree
currentGain = gini(rows)
column_lenght = len(rows[0])
rows_length = len(rows)
best_gain = 0.0
best_value = None
best_set = None
# choose the best gain
for col in range(column_lenght - 1):
col_value_set = set([x[col] for x in rows])
for value in col_value_set:
list1, list2 = chooseSplitData(rows, value, col)
p = len(list1) / rows_length
gain = currentGain - p * gini(list1) - (1 - p) * gini(list2)
if gain > best_gain:
best_gain = gain
best_value = (col, value)
best_set = (list1, list2)
dcY = {'impurity': '%.3f' % currentGain, 'sample': '%d' % rows_length}
#
# stop or not stop
if best_gain > 0:
trueBranch = buildTree(best_set[0])
falseBranch = buildTree(best_set[1])
return CARTTree(col=best_value[0], value=best_value[1], trueBranch=trueBranch, falseBranch=falseBranch, summary=dcY)
else:
return CARTTree(results=calculateDiffCount(rows), summary=dcY, data=rows)
def calculateDiffCount(datas):
results = {}
for data in datas:
# data[-1] means dataType
if data[-1] not in results:
results.setdefault(data[-1], 1)
else:
results[data[-1]] += 1
return results
def pruneTree(tree, miniGain):
if tree.trueBranch.results == None:
pruneTree(tree.trueBranch, miniGain)
if tree.falseBranch.results == None:
pruneTree(tree.falseBranch, miniGain)
if tree.trueBranch.results != None and tree.falseBranch.results != None:
len1 = len(tree.trueBranch.data)
len2 = len(tree.falseBranch.data)
len3 = len(tree.trueBranch.data + tree.falseBranch.data)
p = float(len1) / (len1 + len2)
gain = gini(tree.trueBranch.data + tree.falseBranch.data) - p * gini(tree.trueBranch.data) - (1 - p) * gini(tree.falseBranch.data)
if gain < miniGain:
tree.data = tree.trueBranch.data + tree.falseBranch.data
tree.results = calculateDiffCount(tree.data)
tree.trueBranch = None
tree.falseBranch = None
def classify(data, tree):
if tree.results != None:
return tree.results
else:
branch = None
v = data[tree.col]
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
else:
if v == tree.value:
branch = tree.trueBranch
else:
branch = tree.falseBranch
return classify(data, branch)
def loadCSV():
def convertTypes(s):
s = s.strip()
try:
return float(s) if '.' in s else int(s)
except ValueError:
return s
data = np.loadtxt("name_datas.csv", dtype='str', delimiter=',')
data = data[1:, :]
dataSet =([[convertTypes(item) for item in row] for row in data])
return dataSet
# 画树
if __name__ == '__main__':
dataSet = loadCSV()
decisionTree = buildTree(dataSet)
pruneTree(decisionTree, 0.4)
pre_name = [5.1, 3.5, 1.4, 0.2]
print(classify(pre_name, decisionTree))
|
"""
test gain and noise stability
"""
import sys
import azcam
def test_gain_stability(cycles=5):
for i in range(cycles):
print(f"Testing cycle: {i + 1}/{cycles}")
# azcam.db.exposure.set_roi(-1, -1, 1, 500, 1, 1)
azcam.db.gain.find()
return
if __name__ == "__main__":
args = sys.argv[1:]
test_gain_stability(*args)
|
#!/usr/bin/env python3
"""
Created on 24 Mar 2021
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
sim = Host.sim()
print(sim)
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.basics.moviefile Creating a movie from a sequence of stills
#
# An instance of the MovieFile class in this module allows creating a movie file from a sequence of images.
# -----------------------------------------------------------------
# Import standard modules
import os.path
import subprocess
# -----------------------------------------------------------------
# MovieFile class
# -----------------------------------------------------------------
# TODO: Avoid mencoder dependency (which is difficult to install on Mac) by using matplotlib's MovieWriter
# (http://stackoverflow.com/questions/4092927/generating-movie-from-python-without-saving-individual-frames-to-files)
# or something else ?
## An instance of the MovieFile class allows creating a movie file from a sequence of images. Use the constructor
# to specify the filename and the movie format, insert the images one at a time with the add() function, and finally
# write the movie header information to the file with the close() function.
#
# This class requires MEncoder to be installed as \c /usr/local/bin/mencoder (the location is hard-coded).
# See \ref InstallMacMEncoder in the installation guide.
#
# This class can generate movies in two distinct formats:
# - MPEG-1 codec in \c .mpg container: this format is compatible with most movie players, including Windows
# Media Player, Apple's QuickTime Player, and the free opensource player VLC. However the files are typically
# several times larger than those created with more modern compression technologies such as MPEG-4. Also,
# in this format the aspect ratio of the movie frame is fixed at 4/3 and the frame rate is fixed at 24 fps.
# - MPEG-4 codec in \c .mov container: this format is playable with Apple's QuickTime Player (standard on Mac OS X
# and available as a free download on Windows) and the free opensource player VLC. This format uses a modern
# compression technology and it supports any aspect ratio and frame rate (probably within certain limits).
#
class MovieFile:
## The constructor launches \c mencoder to create a movie output file (replacing any existing file with the same
# name). When the constructor finishes, \c mencoder is waiting for movie frames to be sent to it.
#
# The constructor accepts the following arguments:
# - filepath: the filepath of the movie output file, which \em must end with either the .mpg
# or the .mov filename extension, determining the output movie format.
# - shape: the number of movie frame pixels in the x and y directions; the default value is (800,600).
# For the .mpg format the aspect ratio \em must be 4/3, i.e. <tt>shape[0]/shape[1]==4/3</tt>.
# - rate: the number of frames per second in the output movie; the default value is 24 fps.
# For the .mpg format the frame rate \em must be 24 fps.
#
def __init__(self, filepath, shape=(800,600), rate=24):
# verify restrictions on arguments
assert filepath.endswith((".mov",".mpg"))
if filepath.endswith(".mpg"):
assert 3*shape[0] == 4*shape[1]
assert rate == 24
# remember total number of bytes in pixel buffer for a single frame encoded as RGBA
self.buflen = 4*shape[0]*shape[1]
# ensure that we have access rights to create the output file (since we ignore any messages from mencoder)
filepath = os.path.expanduser(filepath)
open(filepath,'w').close()
# construct the first part of the command line for raw video input (identical for both output formats)
# note: type '$ mencoder -vf format=fmt=help' for a list of valid pixel formats / byte orderings
cmdline = [ '/usr/local/bin/mencoder', '/dev/stdin', '-demuxer', 'rawvideo',
'-rawvideo', 'w=%i:h=%i'%shape + ':fps=%i:format=rgba'%rate,
'-really-quiet', '-o', filepath ]
# add the appropriate options for .mov output
if filepath.endswith(".mov"):
cmdline += [
'-ovc', 'lavc',
'-lavcopts', 'vcodec=mpeg4:vbitrate=10000:mbd=2:cmp=2:subcmp=2:trell=yes:v4mv=yes:aic=2:vglobal=1',
'-ffourcc', 'mp4v', '-of', 'lavf',
'-lavfopts', 'format=mp4:i_certify_that_my_video_stream_does_not_use_b_frames'
]
# set the appropriate options for .mpg output
if filepath.endswith(".mpg"):
cmdline += [
'-ovc', 'lavc',
'-lavcopts', 'vcodec=mpeg1video:vbitrate=1152:keyint=15:mbd=2:aspect=4/3',
'-of', 'mpeg',
'-mpegopts', 'format=mpeg1:tsaf:muxrate=2000'
]
# launch mencoder; pipe the input from this process and pipe any messages to the null device
self.p = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
stdout=open(os.path.devnull, 'w'), stderr=subprocess.STDOUT)
# This function writes a string or a buffer containing the pixel data for a single movie frame to \c mencoder.
# Each pixel must be encoded as RGBA (one byte for each R,G,B channel and an additional byte that is ignored).
# The length of the buffer must match <tt>4*shape[0]*shape[1]</tt>.
def add(self, frame):
assert len(frame) == self.buflen
self.p.stdin.write(frame)
#self.p.communicate(input=frame) better? (is recommended)
# This function writes the proper header information for the movie, and closes the movie file.
# The function \em must be called for the movie file to be playable.
def close(self):
self.p.stdin.close()
# -----------------------------------------------------------------
|
__version__ = '0.8.2'
from kson.read.load import load # noqa: F401
from kson.read.load import loads # noqa: F401
from kson.write.writer import dump # noqa: F401
from kson.write.writer import dumps # noqa: F401
|
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from ...utils.api_object import APIObject
from ...utils.types import MISSING
if TYPE_CHECKING:
from ...utils import APINullable, Snowflake
@dataclass
class RoleTags(APIObject):
"""
Special tags/flags which have been defined for a role.
:bot_id:
The id of the bot this role belongs to.
(the role got created by adding the bot with this id)
:integration_id:
The id of the integration this role belongs to.
(the role got created by adding an integration with this id)
:premium_subscriber:
Whether this is the guild's premium subscriber role or not.
"""
bot_id: APINullable[Snowflake] = MISSING
integration_id: APINullable[Snowflake] = MISSING
premium_subscriber: APINullable[bool] = MISSING
@dataclass
class Role(APIObject):
"""
Represents a Discord guild/server role.
:param color:
integer representation of hexadecimal color code
:param hoist:
if this role is pinned in the user listing
:param id:
role id
:param managed:
whether this role is managed by an integration
:param mentionable:
whether this role is mentionable
:param name:
role name
:param permissions:
permission bit set
:param position:
position of this role
:param tags:
the tags this role has
:param mention:
structures a string to mention the role
"""
color: int
hoist: bool
id: Snowflake
managed: bool
mentionable: bool
name: str
permissions: str
position: int
icon: APINullable[str] = MISSING
unicode_emoji: APINullable[str] = MISSING
tags: APINullable[RoleTags] = MISSING
@property
def mention(self):
return f"<@&{self.id}>"
# TODO: Implement Caching
@classmethod
async def from_id(cls, client, guild_id: int, role_id: int) -> Role:
roles: list = await client.http.get(f"/guilds/{guild_id}/roles")
for role in roles:
if int(role['id']) == role_id:
return cls.from_dict(role)
|
import math
class const:
pi=3.14159265358979323846
G = 6.67e-11
c = 3e+8
class Position2D:
def __init__(self,x=0,y=0,tuple_=None):
self.x = x
self.y = y
if(tuple_):
self.x=tuple_[0]
self.y=tuple_[1]
def Set(self,x=None,y=None):
if(x is not None ):self.x=x
if(y is not None ):self.y=y
return self
def Subtract(self,Position):
self.x-=Position.x
self.y-=Position.y
return self
def Subtract_(self,Position):
x = self.x
y = self.y
x+= Position.x
y+= Position.y
return Position2D(x,y)
def Add(self,Position):
self.x+=Position.x
self.y+=Position.y
return self
def Add_(self,Position):
x = self.x
y = self.y
x+= Position.x
y+= Position.y
return Position2D(x,y)
def Multiple(self,x=0,y=0):
if(x):
self.x*=x
if(y):
self.y*=y
return self
def Multiple_(self,x=0,y=0):
if(x):
x = self.x*x
if(y):
y = self.y*y
return Position2D(x,y)
def Divide(self,x=0,y=0):
if(x):
self.x/=x
if(y):
self.y/=y
return self
def Divide(self,x=0,y=0):
if(x):
x = self.x/x
if(y):
y = self.y/y
return Position2D(x,y)
def GetTuple(self):
return (int(self.x),int(self.y))
def GetDistanceSquare(self,TargetPosition):
return ((TargetPosition.x-self.x)**2 + (TargetPosition.y-self.y)**2)
def GetDistance(self,TargetPosition):
return ((TargetPosition.x-self.x)**2 + (TargetPosition.y-self.y)**2)**0.5
def GetTargetAngle(self,TargetPosition):
return math.atan2(
TargetPosition.y-self.y,
TargetPosition.x-self.x
)
class Vector2D:
def __init__(self,angle=0,value=0):
self.Angle = angle
self.Value = value
self.x = value*math.cos(self.Angle)
self.y = value*math.sin(self.Angle)
def GetAngle(self):
self.Angle = math.atan2(self.y,self.x)
return self.Angle
def GetTuple(self):
return (int(self.x),int(self.y))
def SetValue(self,value):
self.Value = value
self.x = self.Value*math.cos(self.Angle)
self.y = self.Value*math.sin(self.Angle)
def SetAngle(self,angle):
self.Angle = angle
self.x = self.Value*math.cos(self.Angle)
self.y = self.Value*math.sin(self.Angle)
def CalculateVA(self):
self.Value = math.hypot(self.x,self.y)
self.Angle = math.atan2(self.y,self.x)
def SplitVectorSet(self,value,angle):
self.x= value*math.cos(angle)
self.y= value*math.sin(angle)
self.CalculateVA()
def SplitVectorAdd(self,value,angle):
self.x+= value*math.cos(angle)
self.y+= value*math.sin(angle)
self.CalculateVA()
def SplitVectorSub(self,value,angle):
self.x-= value*math.cos(angle)
self.y-= value*math.sin(angle)
self.CalculateVA()
def Add_(self,x=0,y=0):
self.x+=x
self.y+=y
self.CalculateVA()
return self
def Add(self,vector):
self.x+=vector.x
self.y+=vector.y
self.CalculateVA()
return self
def Substract(self,vector):
self.x-=vector.x
self.y-=vector.y
self.CalculateVA()
return self
def Substract_(self,x=0,y=0):
self.x-=x
self.y-=y
self.CalculateVA()
return self
def Multiply_(self,x=1,y=1):
self.x*=x
self.y*=y
self.CalculateVA()
return self
def Multiply(self,vector):
self.x*=vector.x
self.y*=vector.y
self.CalculateVA()
return self
def Divide_(self,x=1,y=1):
self.x/=x
self.y/=y
self.CalculateVA()
return self
def Divide(self,vector):
self.x/=vector.x
self.y/=vector.y
self.CalculateVA()
return self
class Color:
def __init__(self,r=0,g=0,b=0):
self.Red = r
self.Green = g
self.Blue = b
def GetTuple(self):
return (self.Red,self.Green,self.Blue)
def setHex(self,value):
rgb = self.hexToRGB(value)
self.Red = rgb[0]
self.Green = rgb[1]
self.Blue = rgb[2]
return self
def hexToRGB(self,value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
|
import logging
class BaseAction():
def __init__(self):
pass
class Event(BaseAction):
pass
class Offer(BaseAction):
pass
class Order(BaseAction):
pass
|
import datetime, requests, os, json, urllib.request
from io import BytesIO
import google_streetview.api
from PIL import Image
# for signing urls
import hashlib, hmac, base64
from urllib3.util import parse_url
import gsv_depth_scraper.geom
now = datetime.datetime.now()
PANO_URL = 'http://maps.google.com/cbk?output=tile&panoid={panoid}&zoom={z}&x={x}&y={y}&key={key}&' + str(now.microsecond)
#STAT_URL = 'https://maps.googleapis.com/maps/api/staticmap?center=Berkeley,CA&zoom=14&size=400x400&key={key}'
STREET_URL = 'https://maps.googleapis.com/maps/api/streetview?size={size}&location={location}&heading={heading}&pitch=-{pitch}&key={key}'
SIZE = '640x640'
HEADING = '0;90;180;270'
FOV = '45'
PITCH = '0'
GSV_TILEDIM = 512
GOOG_COPYRIGHT = "Google"
#api_sign_secret = "LonEqRR9GhMC4S8cyJ5E0OvXCpg="
#b64sign = "pLa6oGQjAn17ijaPcx41wRZsfSQ="
def load_panos_and_package_to_zip(pth_wrk, zipobj, fmt, limit=False):
panoids, pano_imgs = [], []
pano_fnames = [file for file in os.listdir(pth_wrk) if file.endswith(fmt)]
dpth_fnames = [file for file in os.listdir(pth_wrk) if file.endswith("json")]
if limit: pano_fnames = pano_fnames[:limit]
for pano_fname in pano_fnames:
panoid = os.path.splitext(pano_fname)[0]
if "{}.json".format(panoid) not in dpth_fnames:
print("MISSING JSON FILE\nCould not find {} in working directory {}.\nThis pano will not be archived.".format("{}.json".format(panoid),pth_wrk))
continue
zipobj.write(os.path.join(pth_wrk,pano_fname), os.path.join("pano_img",pano_fname)) # write pano image to zip archive
pano_imgs.append(Image.open(os.path.join(pth_wrk,pano_fname))) # load pano image to memory
panoids.append(panoid)
return panoids, pano_imgs
def panoid_to_img(panoid, api_key, zoom, size_img):
w,h = 2**zoom, 2**(zoom-1)
dim = False
if size_img[0] == 13312: dim = 416
if size_img[0] == 16384: dim = 512
if not dim:
print("!!!! THIS PANO IS A STRANGE DIMENSION {}".format(panoid))
print("zoom:{}\t w,h: {}x{} \t image_size:{}x{}".format(zoom,w,h,size_img[0],size_img[1]))
return False
img = Image.new("RGB", (w*dim, h*dim), "red")
try:
for y in range(h):
for x in range(w):
url_pano = PANO_URL.format(panoid=panoid, z=zoom, x=x, y=y, key=api_key)
response = requests.get(url_pano)
img_tile = Image.open(BytesIO(response.content))
img.paste(img_tile, (GSV_TILEDIM*x,GSV_TILEDIM*y))
except:
print("!!!! FAILED TO DOWNLOAD PANO for {}".format(panoid))
return False
return img.transpose(Image.FLIP_LEFT_RIGHT)
""" def gpts_to_panoids(gjpts, api_key):
locstr = gsv_depth_scraper.geom.concat_gpts_to_goog_str(gjpts)
apiargs = {
'location': locstr,
'key': api_key,
'size': SIZE,
'heading': HEADING,
'fov': FOV,
'pitch': PITCH
}
api_list = google_streetview.helpers.api_list(apiargs)
results = google_streetview.api.results(api_list)
panoids = set()
for meta in results.metadata:
if not meta['status'] == "OK":
print("NO PANORAMA FOUND FOR GIVEN LATLNG. status: {}".format(meta['status']))
continue
if 'copyright' not in meta:
print("Found a panorama with no copyright tag. skipping.")
continue
else:
if not ( meta['copyright'].split()[-1].lower() == GOOG_COPYRIGHT.lower() ):
print("Found a non-google copyright ({}). skipping {}.".format(meta['copyright'], meta['pano_id']))
continue
panoids.add( meta['pano_id'] )
return list(panoids) """
def gpts_to_images(gjpts, api_key):
locstr = gsv_depth_scraper.geom.concat_gpts_to_goog_str(gjpts)
apiargs = {
'location': locstr,
'key': api_key,
'size': SIZE,
'heading': HEADING,
'fov': FOV,
'pitch': PITCH
}
api_list = google_streetview.helpers.api_list(apiargs)
results = google_streetview.api.results(api_list)
results.download_links('C:\\Users\\omarl\\Desktop\\FourthYearProject\\scrapedPanos')
panoids = set()
return list(panoids)
# from https://gist.github.com/christ0pher/f2c4748a09ed31cf71a8
# NOT USED
def sign_url(input_url=None, client_id=None, client_secret=None):
""" Sign a request URL with a Crypto Key.
Usage:
from urlsigner import sign_url
signed_url = sign_url(input_url=my_url,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET)
Args:
input_url - The URL to sign
client_id - Your Client ID
client_secret - Your Crypto Key
Returns:
The signed request URL
"""
# Return if any parameters aren't given
if not input_url or not client_id or not client_secret:
return None
# Add the Client ID to the URL
input_url += "&client=%s" % (client_id)
url = parse_url(input_url)
# We only need to sign the path+query part of the string
url_to_sign = url.path + "?" + url.query
# Decode the private key into its binary format
# We need to decode the URL-encoded private key
decoded_key = base64.urlsafe_b64decode(client_secret)
# Create a signature using the private key and the URL-encoded
# string using HMAC SHA1. This signature will be binary.
signature = hmac.new(decoded_key, url_to_sign.encode(), hashlib.sha1)
# Encode the binary signature into base64 for use within a URL
encoded_signature = base64.urlsafe_b64encode(signature.digest())
original_url = url.scheme + "://" + url.netloc + url.path + "?" + url.query
# Return signed URL
return original_url + "&signature=" + encoded_signature.decode()
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=no-self-use
# pylint: disable=protected-access
#############################################################
# Copyright (c) 2020-2020 Maurice Karrenbrock #
# #
# This software is open-source and is distributed under the #
# BSD 3-Clause "New" or "Revised" License #
#############################################################
import numpy as np
from simtk import unit
import FSDAMGromacs.parse as parse
class TestGromacsParseWorkProfile():
def test_works_parse(self, mocker):
parsed_stuff = [[0., -1], [1., -2.], [2., -3.], [3., -4.]]
expected_output = [[0., 1. / 3., 2. * 1. / 3., 3 * 1. / 3.],
[-1., -2., -3., -4.]]
parsed_stuff = np.array(parsed_stuff)
expected_output = np.array(expected_output)
kjmol_kcalmol_conversion = unit.kilojoules_per_mole.conversion_factor_to(
unit.kilocalories_per_mole) # 0.23901
expected_output[
1, :] = expected_output[1, :] * kjmol_kcalmol_conversion
mocked_parse = mocker.patch(
'FSDAMGromacs.parse.parse_big_gromacs_xvg_files',
return_value=parsed_stuff)
instance = parse.GromacsParseWorkProfile('gromacs')
output = instance.parse('dhdl.xvg',
starting_lambda=0.,
ending_lambda=1.)
mocked_parse.assert_called_once_with('dhdl.xvg')
assert np.testing.assert_allclose(output, expected_output) is None
class TestGromacsParsePullDistances():
def test_parse(self, mocker):
parsed_list = [
'# gmx mdrun is part of G R O M A C S:',
'#',
'# GROningen Mixture of Alchemy and Childrens\' Stories',
'#',
'@ title "Pull COM"',
'@ xaxis label "Time (ps)"',
'@ yaxis label "Position (nm)"',
'@TYPE xy',
'@ view 0.15, 0.15, 0.75, 0.85',
'@ legend on',
'@ legend box on',
'@ legend loctype view',
'@ legend 0.78, 0.8',
'@ legend length 2',
'@ s0 legend "1"',
'@ s1 legend "1 g 1 X"',
'@ s2 legend "1 g 1 Y"',
'@ s3 legend "1 g 1 Z"',
'@ s4 legend "1 g 2 X"',
'@ s5 legend "1 g 2 Y"',
'@ s6 legend "1 g 2 Z"',
'@ s7 legend "2"',
'@ s8 legend "2 g 1 X"',
'@ s9 legend "2 g 1 Y"',
'@ s10 legend "2 g 1 Z"',
'@ s11 legend "2 g 2 X"',
'@ s12 legend "2 g 2 Y"',
'@ s13 legend "2 g 2 Z"',
' 0.0 1.0 2.0 3.0 4.0 5.0 6.0 7.0 8.0 9.0 10.0 11.0 12.0 13.0 14.', # pylint: disable=line-too-long
'15.0 16.0 17.0 18.0 19.0 20.0 21.0 22.0 23.0 24.0 25.0 26.0 27.0 28.0 29.', # pylint: disable=line-too-long
'30.0 31.0 32.0 33.0 34.0 35.0 36.0 37.0 38.0 39.0 40.0 41.0 42.0 43.0 44.', # pylint: disable=line-too-long
'45.0 46.0 47.0 48.0 49.0 50.0 51.0 52.0 53.0 54.0 55.0 56.0 57.0 58.0 59.' # pylint: disable=line-too-long
]
parsed_array = [[
0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14.
],
[
15., 16., 17., 18., 19., 20., 21., 22., 23., 24.,
25., 26., 27., 28., 29.
],
[
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44.
],
[
45., 46., 47., 48., 49., 50., 51., 52., 53., 54.,
55., 56., 57., 58., 59.
]]
parsed_array = np.array(parsed_array)
mocked_parse = mocker.patch(
'FSDAMGromacs.parse.parse_big_gromacs_xvg_files',
return_value=parsed_array)
m_list = mocker.patch('FSDAMGromacs.parse.open')
m_list.return_value.__enter__.return_value = parsed_list
instance = parse.GromacsParsePullDistances('gromacs')
output = instance.parse('dummy_pullx.xvg')
assert isinstance(output, dict)
assert tuple(output.keys()) == (1, 2)
assert np.testing.assert_allclose(output[1],
parsed_array[:, 1] * 10.) is None
assert np.testing.assert_allclose(output[2],
parsed_array[:, 8] * 10.) is None
mocked_parse.assert_called_once_with('dummy_pullx.xvg', separator='\t')
m_list.assert_called_once()
class Testparse_big_gromacs_xvg_files():
def test_works_default(self, mocker):
parsed_list = [
'# gmx mdrun is part of G R O M A C S:',
'#',
'# GROningen Mixture of Alchemy and Childrens\' Stories',
'#',
'@ title "Pull COM"',
'@ xaxis label "Time (ps)"',
'@ yaxis label "Position (nm)"',
' 0.0 1.0 2.0 3.0 4.0 5.0 6.0 7.0 8.0 9.0 10.0 11.0 12.0 13.0 14.', # pylint: disable=line-too-long
'15.0 16.0 17.0 18.0 19.0 20.0 21.0 22.0 23.0 24.0 25.0 26.0 27.0 28.0 29.', # pylint: disable=line-too-long
'30.0 31.0 32.0 33.0 34.0 35.0 36.0 37.0 38.0 39.0 40.0 41.0 42.0 43.0 44.', # pylint: disable=line-too-long
'45.0 46.0 47.0 48.0 49.0 50.0 51.0 52.0 53.0 54.0 55.0 56.0 57.0 58.0 59.' # pylint: disable=line-too-long
]
m_open = mocker.patch('FSDAMGromacs.parse.open')
m_open.return_value.__enter__.return_value = parsed_list
m_pandas_csv = mocker.patch('pandas.read_csv', return_value='csv')
m_numpy = mocker.patch('numpy.array', return_value='array')
output = parse.parse_big_gromacs_xvg_files('dummy_file.xvg')
assert output == 'array'
m_open.assert_called_once_with('dummy_file.xvg', 'r')
m_pandas_csv.assert_called_once_with('dummy_file.xvg',
skiprows=7,
skipinitialspace=True,
sep=' ',
header=None)
m_numpy.assert_called_once_with('csv')
|
from django.apps import AppConfig
class BankConfig(AppConfig):
name = 'bank'
def ready(self):
import bank.signals
|
# To make this config usable, you need to rename it to config.py as opposed to exampleconfig.py
# To make this config usable, you need to rename it to config.py as opposed to exampleconfig.py!
# To make this config usable, you need to rename it to config.py as opposed to exampleconfig.py!!
token = "Nzk3NTU5MTE2MjcxOTc2NDg4.X_oOzw.HioY6RNFLwMccVO5MODhgk4eG14"
dev_ids = [213508710063013888, 213497002548396032]
# Put the ids of the developers, without quotes, here. Just like that. You can put as many or as few as you want.
prefix = "!"
bot_guild_invite = "https://discord.gg/M8aDkmFBMf"
bot_oauth2_invite = "https://discord.gg/M8aDkmFBMf"
info_channel = 0
# Used for things like server joins. If you don't want this, just keep this as "0".
nasa_api_key = "QaOn7iAMusKRdbDfJoOd6i8x2JrprQCzDOUhrWDc"
mapbox_api_key = "sk.eyJ1IjoiZ2VybXlnZXJtIiwiYSI6ImNranE1a3FmaTNhcjgyc21wMXI3ZGhxbnIifQ.UnxWBNfag0TPmuTzmrU-fw"
owm_api_key = "69d87279810b896d388996e4ff3fd995"
|
from tkinter import *
from ScreenManager.Screen import Screen
def DebugPartion (_Partion):
print (_Partion.X, _Partion.Y)
#print (Partion.Name)
print (_Partion)
print (f'Width: {_Partion.WidthPixelPositionStart} -> {_Partion.WidthPixelPositionEnd} || Height: {_Partion.HeightPixelPositionStart} -> {_Partion.HeightPixelPositionEnd}')
print ('------------------------------------------')
def ScreenTest (_Screen):
Width = _Screen.Width
Height = _Screen.Height
Partions = _Screen.Partions
Root = Tk ()
Root.title ('ScreenManager')
Root.geometry (f'{Width}x{Height}')
Root.resizable (False, False)
_Canvas = Canvas (Root, width = Width, height = Height)
_Canvas.pack ()
Colors = [
'red2',
'blue2',
'green2',
'yellow2'
]
TestColors = [
[
'red4',
'red3',
'red2',
'red'
],
[
'blue4',
'blue3',
'blue2',
'blue'
],
[
'green4',
'green3',
'green2',
#'green'
#'lawn green'
'chartreuse2'
],
[
'yellow4',
'yellow3',
'yellow2',
'yellow'
]
]
TextColors = [
'white',
'white',
'black',
'black'
]
for I, Column in enumerate (Partions):
for J, Row in enumerate (Column):
_Canvas.create_rectangle (
Row.WidthPixelPositionStart,
Row.HeightPixelPositionStart,
Row.WidthPixelPositionEnd,
Row.HeightPixelPositionEnd,
#fill = Colors[I],
fill = TestColors[I][J],
outline = ''
)
X = Row.WidthPixelPositionEnd - (Width / 8)
Y = Row.HeightPixelPositionEnd - (Height / 8)
_Canvas.create_text (
X,
Y,
text = Row.ID,
#fill = 'white' if J == 0 else TextColors[I]
)
Root.mainloop ()
if __name__ == '__main__':
#MyScreen = Screen () # 1920x1080
#MyScreen = Screen (Dimensions = (800, 600)) # 800x600
MyScreen = Screen (Dimensions = (1280, 720)) # 1280x720
ScreenTest (MyScreen)
#for Column in MyScreen.Partions:
#for Row in Column:
#DebugPartion (Row)
#print (Row.PositionID)
#pass
|
'''
删除子文件夹
你是一位系统管理员,手里有一份文件夹列表 folder,你的任务是要删除该列表中的所有 子文件夹,并以 任意顺序 返回剩下的文件夹。
我们这样定义「子文件夹」:
如果文件夹 folder[i] 位于另一个文件夹 folder[j] 下,那么 folder[i] 就是 folder[j] 的子文件夹。
文件夹的「路径」是由一个或多个按以下格式串联形成的字符串:
/ 后跟一个或者多个小写英文字母。
例如,/leetcode 和 /leetcode/problems 都是有效的路径,而空字符串和 / 不是。
示例 1:
输入:folder = ["/a","/a/b","/c/d","/c/d/e","/c/f"]
输出:["/a","/c/d","/c/f"]
解释:"/a/b/" 是 "/a" 的子文件夹,而 "/c/d/e" 是 "/c/d" 的子文件夹。
示例 2:
输入:folder = ["/a","/a/b/c","/a/b/d"]
输出:["/a"]
解释:文件夹 "/a/b/c" 和 "/a/b/d/" 都会被删除,因为它们都是 "/a" 的子文件夹。
示例 3:
输入:folder = ["/a/b/c","/a/b/d","/a/b/ca"]
输出:["/a/b/c","/a/b/ca","/a/b/d"]
提示:
1 <= folder.length <= 4 * 10^4
2 <= folder[i].length <= 100
folder[i] 只包含小写字母和 /
folder[i] 总是以字符 / 起始
每个文件夹名都是唯一的
'''
from typing import List
'''
思路:字典树
1、先对folder按照长度进行排序
2、遍历folder,对于每个路径每到一个目录名,就判断是否在字典树中存在,
如果在字典树中不存在,则继续向后搜索,直至整个路径都在字典树中不存在,加入字典树,加入结果
如果前缀在字典树中存在,说明有父目录存在,需要跳过
时间复杂度:O(mn),m为folder的长度,n为字符串的平均长度。排序需要O(mlogm),查找需要O(mn)
空间复杂度:O(mn),最坏情况下,所有路径都加入字典树
'''
class Solution:
def removeSubfolders(self, folder: List[str]) -> List[str]:
folder.sort(key=lambda s: len(s)) # 按照长度排序,确保父目录在前面进入字典树
root = {}
finishs = set()
ans = []
# 将folder中的目录依次尝试加入字典树
for p in folder:
node = root
n = len(p)
for i in range(n):
if p[i] not in node:
node[p[i]] = {}
node = node[p[i]]
if i < n - 1 and p[i + 1] == '/': # 如果当前字符是目录名的最后1个字母,需要判断当前目录名是否存在
if id(node) in finishs: # 如果当前路径的前缀已经在字典树中存在,说明是子目录,需要跳过
break
else: # 循环没有提前退出,说明是新的路径,需要加入字典树的终点
finishs.add(id(node))
ans.append(p)
return ans
s = Solution()
print(s.removeSubfolders(["/a", "/a/b", "/c/d", "/c/d/e", "/c/f"]))
print(s.removeSubfolders(["/a", "/a/b/c", "/a/b/d"]))
print(s.removeSubfolders(["/a/b/c", "/a/b/d", "/a/b/ca"]))
|
#_*_ coding: utf-8 _*_
L = [1,2,3]
print type(L)
print len(L)
print L[-1]
print L+L
print L*3
print (L*3)[::-1]
l2 = ['a','b']
l2[0]='c'
print l2
l3 = range(10)
print l3
print l3[::2]
print l3[::-1]
print 4 in l3
t = (1,2,3)
print len(t)
print t[0]
print t[::-1]
t = t[::-1]
print t
print t*10
print 3 in t
t= t[::-1]
print t
d = {"name":"홍길동","age":36}
print d
print d["name"]
dic = {"a":{"name":"홍길동"}}
print dic["a"]["name"]
print "name" in d
print d.keys()
print d.values()
print d.items()
print type(None)
a = None
print a
print "-"*20
a = 500
print id(a)
print id(a)
print id(a)
c = [1,2,3]
d = [1,2,3]
print c is d
print id(c)
print id(d)
print c == d
a=1
b=1
print id(a)
print id(b)
print a is b
print c[0] is a
print id(c[0])
print a == c
|
import hgc
from hgc.samples_frame import SamplesFrame
import pandas as pd
import numpy as np
from unittest import TestCase, mock
from datetime import datetime
import pytest
# define the fixtures
@pytest.fixture(name='test_data_bas_vdg')
def fixture_test_bas_vdg():
""" test data as been used by Bas vdG from testing routine 060602020"""
test_data = {
'ph_lab': [7.5, 6.1, 7.6], 'ph_field': [4.4, 6.1, 7.7],
'ec_lab': [304, 401, 340], 'ec_field': [290, 'error', 334.6],
'temp': [10, 10, 10],
'alkalinity': [110, 7, 121],
'O2': [11, 0, 0],
'Na': [2,40,310],
'K':[0.4, 2.1, 2.0],
'Ca':[40,3,47],
'Fe': [0.10, 2.33, 0.4],
'Mn': [0.02, 0.06, 0.13],
'NH4': [1.29, 0.08, 0.34],
'SiO2': [0.2, 15.4, 13.3],
'SO4': [7,19,35],
'NO3': [3.4,0.1,0],
'Cl': [10,50,310]
}
df = pd.DataFrame(test_data)
df.hgc.make_valid()
return pd.DataFrame(df)
def test_valid_samples_frame():
df = pd.read_csv('./examples/data/dataset_basic.csv', skiprows=[1], parse_dates=['date'], dayfirst=True)
assert df.hgc.is_valid == True
def test_invalid_changed_samples_frame():
df = pd.read_csv('./examples/data/dataset_basic.csv', skiprows=[1], parse_dates=['date'], dayfirst=True)
assert df.hgc.is_valid == True
df.loc[1, 'F'] = -1
# this test does not fail because the self._obj in df.hgc is the orginal
# pandas obj and is not changed in the hgc namespace
assert df.hgc._obj.loc[1, 'F'] == -1
assert df.hgc.is_valid == False
def test_valid_samples_frame_excel():
df = pd.read_excel('./examples/data/dataset_basic.xlsx', skiprows=[1])
assert df.hgc.is_valid == True
def test_invalid_samples_frame():
df = pd.read_csv('./examples/data/dataset_invalid_columns.csv', skiprows=[1], parse_dates=['date'], dayfirst=True)
assert df.hgc.is_valid == False
def test_make_valid():
df = pd.read_csv('./examples/data/dataset_invalid_columns.csv', skiprows=[1], parse_dates=['date'], dayfirst=True)
df.hgc.make_valid()
assert df.hgc.is_valid == True
def test_get_ratios_invalid_frame():
df = pd.DataFrame()
with pytest.raises(ValueError):
df.hgc.get_ratios()
def test_get_ratios():
df = pd.read_csv('./examples/data/dataset_basic.csv', skiprows=[1], parse_dates=['date'], dayfirst=True)
df_ratios_original = pd.DataFrame(dict(
cl_to_br=[286, None, None, 309, None, None, 275, None,
None, None, 322, 275, 292, 231, None, None, None, ],
cl_to_na=[1.78, 1.27, 1.63, 1.70, 1.95, 1.71, 1.52, 1.61,
1.55, 2.20, 1.93, 1.38, 0.23, 0.32, 0.88, 1.45, 1.56, ],
ca_to_mg=[0.9, 1.3, 0.7, 0.8, 0.9, 4.8, 13.6, 14.2,
15.0, 15.5, 19.2, 0.6, 1.0, 0.9, 1.4, 0.9, 0.4],
ca_to_sr=[143, 40, 74, 115, 192, 130, 238, 276, 245, 267, 253, 81, 120, 122, None, None, None, ],
fe_to_mn=[5.00, 5.80, 4.00, 38.83, 26.38, 23.33, 3.33, 3.08, 3.92, 18.89, 9.38, 17.25, 15.50, 1.42, 13.33, 18.00, 76.67],
hco3_to_ca=[0.00, 0.00, 0.00, 1.53, 1.71, 1.16, 1.59, 1.69, 1.88, 2.07, 2.13, 8.54, 87.14, 38.32, 19.49, 8.99, 10.82, ],
hco3_to_sum_anions=[None, 0.00, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ],
cod_to_doc=[0.33, 0.37, 0.12, 0.10, 0.09, 0.25, 0.27, 0.23, None, None, 0.32, 0.36, 0.40, 0.37, 0.38, 0.37, 0.38, ],
monc=[2.69, 2.50, 3.51, 3.52, 3.55, 2.97, 2.90, 3.06, None, None, 2.67, 2.51, 2.39, 2.53, 2.48, 2.49, 2.48, ],
suva=[None] * 17))
df_ratios_original['2h_to_18o'] = [None] * 17
df_ratios = df.hgc.get_ratios(inplace=False)
assert isinstance(df_ratios, pd.core.frame.DataFrame)
def test_consolidate():
df = pd.read_csv('./examples/data/dataset_basic.csv', skiprows=[1], parse_dates=['date'], dayfirst=True)
df.hgc.consolidate(use_so4=None, use_o2=None, use_ph='lab')
def test_consolidate_w_not_all_cols():
''' test that consolidate works when not all
(default) columns are present '''
testdata = {
'ph_lab': [4.3, 6.3, 5.4], 'ph_field': [4.4, 6.1, 5.7],
'ec_lab': [304, 401, 340], 'ec_field': [290, 'error', 334.6],
}
df = pd.DataFrame.from_dict(testdata)
df.hgc.make_valid()
with pytest.raises(ValueError):
df.hgc.consolidate(use_ph='field', use_ec='lab',)
df.hgc.consolidate(use_ph='field', use_ec='lab', use_temp=None,
use_so4=None, use_o2=None)
def test_consolidate_alkalinity():
''' test that consolidate works when not all
(default) columns are present '''
testdata = {
'alkalinity': [4.3, 6.3, 5.4], 'hco3': [4.4, 6.1, 5.7],
'ph_lab': [4.3, 6.3, 5.4], 'ph_field': [4.4, 6.1, 5.7],
'ec_lab': [304, 401, 340], 'ec_field': [290, 'error', 334.6],
}
df = pd.DataFrame.from_dict(testdata)
df.hgc.make_valid()
alk = df.alkalinity
hco3 = df.hco3
df.hgc.consolidate(use_ph='field', use_ec='lab',use_alkalinity='alkalinity',
use_so4=None, use_o2=None, use_temp=None)
pd.testing.assert_series_equal(df.alkalinity, alk)
df.hgc.consolidate(use_alkalinity='hco3', use_so4=None, use_o2=None,
use_temp=None)
assert all(df.alkalinity.values == hco3.values)
assert 'hco3' not in df.columns
def test_get_sum_anions_1():
""" This testcase is based on row 11, sheet 4 of original Excel-based HGC """
df = pd.DataFrame([[56., 16., 1.5, 0.027, 0.0, 0.0, 3.4, 0.04, 7., 4.5]], columns=('Br', 'Cl', 'doc', 'F', 'alkalinity', 'NO2', 'NO3', 'PO4', 'SO4', 'ph'))
df.hgc.make_valid()
sum_anions = df.hgc.get_sum_anions(inplace=False)
assert np.round(sum_anions[0], 2) == 0.67
def test_get_sum_anions_2():
""" This testcase is based on sheet 5, row 12 of original Excel-based HGC """
testdata = {
'Br': [0],
'Cl': [19.0],
'doc': [4.4],
'F': [0.08],
'alkalinity': [0.0],
'NO2': [0.0],
'NO3': [22.6],
'PO4': [0.04],
'SO4': [16.0],
'ph': [4.3]
}
df = pd.DataFrame.from_dict(testdata)
df.hgc.make_valid()
sum_anions = df.hgc.get_sum_anions(inplace=False)
assert np.round(sum_anions[0], 2) == 1.28
def test_get_sum_anions_3(test_data_bas_vdg):
""" Test based on Bas vd Grift bug report """
df = test_data_bas_vdg
df.hgc.consolidate(use_ph='lab', use_ec='lab', use_temp=None, use_so4=None, use_o2=None)
sum_anions = df.hgc.get_sum_anions(inplace=False)
np.testing.assert_almost_equal(sum_anions.values,
np.array([2.285332174633880, 1.9222333673010, 11.4556385209268
]))
sum_cations = df.hgc.get_sum_cations(inplace=False)
np.testing.assert_almost_equal(sum_cations.values,
np.array([2.1690812, 2.0341514, 15.9185133]))
def test_get_sum_cations():
df = pd.DataFrame([[4.5, 9.0, 0.4, 1.0, 1.1, 0.1, 0.02, 1.29, 99.0, 3.0, 0.3, 3.2, 0.6, 0.6, 10.4, 7.0, 15.0]], columns=('ph', 'Na', 'K', 'Ca', 'Mg', 'Fe', 'Mn', 'NH4', 'Al', 'Ba', 'Co', 'Cu', 'Li', 'Ni', 'Pb', 'Sr', 'Zn'))
df.hgc.make_valid()
sum_cations = df.hgc.get_sum_cations(inplace=False)
assert np.round(sum_cations[0], 2) == 0.66
def test_get_stuyfzand_water_type():
""" Testcase matches row 12, sheet 6 of HGC Excel """
testdata = {
'Al': [2600],
'Ba': [44.0],
'Br': [0.0],
'Ca': [2.0],
'Cl': [19.0],
'Co': [1.2],
'Cu': [4.0],
'doc': [4.4],
'F': [0.08],
'Fe': [0.29],
'alkalinity': [0.0],
'K': [1.1],
'Li': [5.0],
'Mg': [1.6],
'Mn': [0.05],
'Na': [15.0],
'Ni': [7.0],
'NH4': [0.05],
'NO2': [0.0],
'NO3': [22.6],
'Pb': [2.7],
'PO4': [0.04],
'ph': [4.3],
'SO4': [16.0],
'Sr': [50],
'Zn': [60.0]
}
df = pd.DataFrame.from_dict(testdata)
df.hgc.make_valid()
water_type = df.hgc.get_stuyfzand_water_type(inplace=False)
assert water_type[0] == 'g*NaNO3o'
def test_get_stuyfzand_water_type_2(test_data_bas_vdg):
""" test based on bas van de grift his test data """
# abbrevation
df = test_data_bas_vdg
df.hgc.consolidate(use_ph='lab', use_ec='lab',
use_temp=None, use_so4=None, use_o2=None)
assert df.hgc.get_stuyfzand_water_type(inplace=False).to_list() == ['g1CaHCO3o', 'F*NaClo', 'B1NaCl']
testdata = {
'ph_lab': [7.5, 6.1, 7.6], 'ph_field': [4.4, 6.1, 7.7],
'ec_lab': [304, 401, 340], 'ec_field': [290, 'error', 334.6],
'temp': [10, 10, 10],
'alkalinity': [110, 7, 121],
# 'HCO3': [110, 7, 121],
'O2': [11, 0, 0],
'Na': [2,40,310],
'K':[0.4, 2.1, 2.0],
'Ca':[40,3,47],
'Fe': [0.10, 2.33, 0.4],
'Mn': [0.02, 0.06, 0.13],
'NH4': [1.29, 0.08, 0.34],
# 'Amm': [1.29, 0.08, 0.34],
'SiO2': [0.2, 15.4, 13.3],
'SO4': [7,19,35],
'NO3': [3.4,0.1,0],
'Cl': [10,50,310]
}
df = pd.DataFrame.from_dict(testdata)
df.hgc.make_valid()
df.hgc.consolidate(use_ph='lab', use_ec='lab', use_temp=None, use_so4=None, use_o2=None)
df.hgc.get_stuyfzand_water_type(inplace=False)
def test_get_bex():
""" Sheet 5 - col EC in HGC Excel """
df = pd.DataFrame([[15., 1.1, 1.6, 19.]], columns=('Na', 'K', 'Mg', 'Cl'))
df.hgc.make_valid()
bex = df.hgc.get_bex(inplace=False)
assert np.round(bex[0], 2) == 0.24
def test_inplace(test_data_bas_vdg):
""" Test to see if the inplace argument behaves as expected: returning
a series in inplace=False and appending the column if inplace=True"""
test_data_bas_vdg.hgc.consolidate(use_so4=None, use_o2=None, use_ph='lab')
def assert_column_added_inplace(column, is_added, method_name, method_kwargs):
""" assert whether a column is added to the dataframe or not when
a method with its arguments method_kwargs are called """
df = test_data_bas_vdg.copy(deep=True)
n_columns = len(df.columns)
assert column not in df.columns
method = getattr(df.hgc, method_name)
df_out = method(**method_kwargs)
if is_added:
assert column in df.columns
assert n_columns != len(df.columns)
assert df_out is None
else:
assert column not in df.columns
assert n_columns == len(df.columns)
assert df_out is not None
assert_column_added_inplace('bex', is_added=True, method_name='get_bex',
method_kwargs=dict(inplace=True))
assert_column_added_inplace('bex', is_added=True, method_name='get_bex',
method_kwargs=dict())
assert_column_added_inplace('bex', is_added=False, method_name='get_bex',
method_kwargs=dict(inplace=False))
assert_column_added_inplace('cl_to_na', is_added=True, method_name='get_ratios',
method_kwargs=dict(inplace=True))
assert_column_added_inplace('cl_to_na', is_added=True, method_name='get_ratios',
method_kwargs=dict())
assert_column_added_inplace('cl_to_na', is_added=False, method_name='get_ratios',
method_kwargs=dict(inplace=False))
assert_column_added_inplace('water_type', is_added=True, method_name='get_stuyfzand_water_type',
method_kwargs=dict(inplace=True))
assert_column_added_inplace('water_type', is_added=True, method_name='get_stuyfzand_water_type',
method_kwargs=dict())
assert_column_added_inplace('water_type', is_added=False, method_name='get_stuyfzand_water_type',
method_kwargs=dict(inplace=False))
assert_column_added_inplace('dominant_anion', is_added=True, method_name='get_dominant_anions',
method_kwargs=dict(inplace=True))
assert_column_added_inplace('dominant_anion', is_added=True, method_name='get_dominant_anions',
method_kwargs=dict())
assert_column_added_inplace('dominant_anion', is_added=False, method_name='get_dominant_anions',
method_kwargs=dict(inplace=False))
assert_column_added_inplace('sum_anions', is_added=True, method_name='get_sum_anions',
method_kwargs=dict(inplace=True))
assert_column_added_inplace('sum_anions', is_added=True, method_name='get_sum_anions',
method_kwargs=dict())
assert_column_added_inplace('sum_anions', is_added=False, method_name='get_sum_anions',
method_kwargs=dict(inplace=False))
assert_column_added_inplace('sum_cations', is_added=True, method_name='get_sum_cations',
method_kwargs=dict(inplace=True))
assert_column_added_inplace('sum_cations', is_added=True, method_name='get_sum_cations',
method_kwargs=dict())
assert_column_added_inplace('sum_cations', is_added=False, method_name='get_sum_cations',
method_kwargs=dict(inplace=False))
assert_column_added_inplace('pp_solutions', is_added=True, method_name='get_phreeqpython_solutions',
method_kwargs=dict(inplace=True))
assert_column_added_inplace('pp_solutions', is_added=True, method_name='get_phreeqpython_solutions',
method_kwargs=dict())
assert_column_added_inplace('pp_solutions', is_added=False, method_name='get_phreeqpython_solutions',
method_kwargs=dict(inplace=False))
assert_column_added_inplace('SI calcite', is_added=True, method_name='get_saturation_index',
method_kwargs=dict(inplace=True, mineral_or_gas='calcite'))
assert_column_added_inplace('SI calcite', is_added=True, method_name='get_saturation_index',
method_kwargs=dict(mineral_or_gas='calcite'))
assert_column_added_inplace('SI calcite', is_added=False, method_name='get_saturation_index',
method_kwargs=dict(inplace=False, mineral_or_gas='calcite'))
assert_column_added_inplace('sc', is_added=True, method_name='get_specific_conductance',
method_kwargs=dict(inplace=True))
assert_column_added_inplace('sc', is_added=True, method_name='get_specific_conductance',
method_kwargs=dict())
assert_column_added_inplace('sc', is_added=False, method_name='get_specific_conductance',
method_kwargs=dict(inplace=False))
|
import numpy as np
from sklearn.isotonic import IsotonicRegression
from cvxopt import matrix, solvers
solvers.options['show_progress'] = False
#######
# Monotone Cone and Positive Monotone Cone
#######
def solve_beta_mnt(X, Y, pos=False, learning_rate=0.01, stop_criteria=10**-4):
"""
Solve the beta for monotone cone and positive monotone cone.
@param X np.array:
@param Y np.array:
@param pos bool: True if K is positive mnt cone, otherwise False
@param learning_rate float: the step size is learning_rate/i
@param stop_criteria float: the stop criteria
@return np.array: the coefficient estimation by constrained lasso.
Test:
n, p = 100, 250
beta = np.array([0]*int(0.7*p) + [1]*(p-int(0.7*p)))
X = np.random.normal(size = (n,p)) @ Sigma_sqrt
Y = X @ beta + np.random.normal(0, noise_sd, n)
beta_hat = mnt_reg(X, Y)
print(beta_hat)
"""
n = len(Y)
p = X.shape[1]
iso_order = np.arange(p)
# initialize
beta_prev = np.ones(p)
beta = np.random.normal(size = X.shape[1])
# gradient descent
i = 0.0 # iteration number
while sum((beta-beta_prev)**2)**0.5 > stop_criteria:
i += 1
# print(sum((beta-beta_prev)**2)**0.5) # used for debug
# calculate gradient
beta_grad = -2/n * (X.T@Y - X.T@X@beta)
# update beta_prev
beta_prev = beta
# update beta with projection
beta = beta - (1/i) * learning_rate * beta_grad
beta = IsotonicRegression().fit_transform(iso_order, beta)
# if pos == True, assign zero to negative coordinates
if pos: beta = np.where(beta > 0, beta, 0)
# print(sum((beta-beta_prev)**2)**0.5) # used for testing
return beta
#######
# LASSO
#######
def solve_beta_lasso(X, Y, t):
"""
Solve the constrained lasso with l1 norm bound t.
@param X np.array:
@param Y np.array:
@param t float: l1 norm bound
@return np.array: the coefficient estimation by constrained lasso.
"""
p = X.shape[1]
cov_mat = X.T @ X
cov_mat = np.concatenate((cov_mat, cov_mat), 0)
xy = X.T @ Y * (-2.0)
# QP to solve concatenated beta
P = matrix(np.concatenate((cov_mat, cov_mat), 1), tc='d')
q = matrix(np.concatenate((xy, xy), 0), tc='d')
G = matrix(np.diag(np.concatenate((-1.0*np.ones(p), np.ones(p)), 0)), tc='d')
h = matrix(np.zeros(2*p), tc='d')
A = matrix(np.concatenate((np.ones(p), -1.0*np.ones(p)), 0).reshape((1,2*p)), tc='d')
b = matrix(t, tc='d')
# Get the solution of QP
beta_bundle = np.array(solvers.qp(P,q,G,h,A,b)['x'])
# Reconstruct beta, assign zero to the very small coordinates
beta = beta_bundle[:p] + beta_bundle[p:]
# print(sum(abs(beta)))
beta = np.where(beta > 10**-4, beta, 0)
return np.squeeze(beta)
#######
# SLOPE
#######
def solve_beta_slope(X, Y, lbd_vec, h=0.1, lr=5.0):
"""
Solve the SLOPE by proximal gradient descent.
@param X np.array: the data matrix.
@param Y np.array: the response vector.
@param lbd_vec: the tuning param according to the 'slope meets lasso' paper
@param h float: the step size of optimization. It needs to be small enough (h/n < 2/||X|| according to Bogdan et al.)
@param lr float: the learning rate of optimization. Use a large value to avoid overfit.
@return np.array: the coefficient got by SLOPE.
"""
n, p = X.shape[0], X.shape[1]
# i = 0
beta_prev = np.zeros(p)
beta_new = np.ones(p)
while abs(obj_slope(X, Y, lbd_vec, beta_prev)-obj_slope(X, Y, lbd_vec, beta_new)) > lr:
beta_prev = beta_new
beta_new = prox_slope(beta_new - (h/n) * (X.T @ (X @ beta_new - Y)), h/n, lbd_vec)
# i += 1
# if i % 2 == 0:
# print(i)
# print("prev value: ", obj_slope(X, Y, lbd_vec, beta_prev))
# print("new value: ", obj_slope(X, Y, lbd_vec, beta_new))
# print(sum(abs(beta_new)))
# print(beta_new)
return beta_new
#######
# Square-root SLOPE
#######
def solve_beta_sqrt_slope(X, Y, lbd_vec, h=2.0, lr=5.0):
"""
Solve the square-root SLOPE.
@param X np.array: the data matrix.
@param Y np.array: the response vector.
@param lbd_vec: the tuning param according to Derumigny's paper
@param h float: the step size of optimization is h/i.
@param lr float: the learning rate of optimization. Use a large value to avoid overfit.
@return np.array: the coefficient got by square-root SLOPE.
"""
p = X.shape[1]
sigma_prev, sigma_new = np.var(Y)**0.5, np.var(Y)**0.5
beta_prev, beta_new = np.zeros(p), solve_beta_slope(X, Y, sigma_new*lbd_vec)
i = 1.0
while abs(obj_sqrt_slope(X, Y, lbd_vec, beta_prev, sigma_prev) - obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new)) > lr:
sigma_prev, beta_prev = sigma_new, beta_new
sigma_new = sigma_new - (h/i) * ( 1 - np.var(Y - X@beta_new)/sigma_new**2 )
beta_new = solve_beta_slope(X, Y, sigma_new*lbd_vec)
i += 1
# if i % 100 == 0: print('step1: i=', i)
# print("i=", i)
# print('sigma_prev, sigma_new: ', sigma_prev, sigma_new)
# print('obj value:', obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new))
# print('difference: ', abs(obj_sqrt_slope(X, Y, lbd_vec, beta_prev, sigma_prev) - obj_sqrt_slope(X, Y, lbd_vec, beta_new, sigma_new)))
return beta_new
#######
# Auxiliary Functions
#######
def prox_slope(x, h, lbd):
"""
Get the prox mapping in each step of the SLOPE solver.
@param x np.array (p, ): the vector to be mapped.
@param h float: the step size of proximal method.
@param lbd np.array (p, ): the vector of lambda.
@return np.array (p, ): the prox mapping of x.
"""
# reorder the lambda to make it coincide with the order of x
sort_idx = np.argsort(abs(x))
rank_x = np.arange(len(x))[np.argsort(sort_idx)]
return np.sign(x) * np.clip(abs(x) - lbd[rank_x] * h, 0, None)
def obj_slope(X, Y, lbd, beta):
"""
Evaluate the objective function in the optimization of SLOPE.
@param X np.array: the data matrix.
@param Y np.array: the response vector.
@param lbd np.array: the constraint parameter.
@param beta np.array: the coefficient.
"""
n = X.shape[0]
return np.sum((Y - X@beta)**2)/n + np.sum(lbd * np.sort(abs(beta))[::-1])
def obj_sqrt_slope(X, Y, lbd, beta, sigma):
"""
Evaluate the objective function in the optimization of square-root SLOPE.
@param X np.array: the data matrix.
@param Y np.array: the response vector.
@param lbd np.array: the constraint parameter.
@param beta np.array: the coefficient.
@param sigma float: the standard error of noise.
"""
n = X.shape[0]
return sigma + np.sum((Y - X@beta)**2) / (2 * n * sigma) + np.sum(sigma * lbd * np.sort(abs(beta))[::-1])
|
from django.db.models import Count, Q
from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from .forms import AnswerForm, AnonymousCommentForm
from .models import Tag, Post
from users.models import Profile
from django.core.paginator import Paginator
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from .models import Post, Comment
from .models import Question
from .models import Answer
from django.http import HttpResponse
from .forms import CommentForm
from django.contrib.auth.decorators import login_required
def search(request):
queryset = Post.objects.all()
query = request.GET.get('q')
if query:
queryset = queryset.filter(
Q(Title__icontains=query)).distinct()
context = {
'queryset': queryset
}
return render(request, 'blog/search_results.html', context)
def get_tag_count():
queryset = Post.objects.values('tags__title').annotate(Count('tags__title'))
return queryset
def question(request):
form = AnswerForm(request.POST or None, request.FILES)
questions=Question.objects.all()
tags = Tag.objects.all()
for question in questions :
question.answer = question.answer_set.all()
return render(request, 'blog/question.html',{'questions': questions,'tags':tags,'form':form})
class PostListView(ListView):
model = Post
template_name = 'blog/home.html'
context_object_name = 'posts'
ordering = ['-time_of_publishment']
paginate_by = 5
class UserPostListView(ListView):
model = Post
template_name = 'blog/user_posts.html'
context_object_name = 'posts'
paginate_by = 5
def get_query_set(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model = Post
tag_count = get_tag_count()
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['Title','Content', 'thumb', 'tags']
def form_valid(self, form):
form.instance.Author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['Title','Content', 'thumb', 'tags']
def form_valid(self, form):
form.instance.Author = self.request.user
return super().form_valid(form)
def test_func(self):
post = self.get_object()
if self.request.user == post.Author:
return True
return False
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.Author:
return True
return False
def about(request):
return render(request, 'blog/about.html', {'title':'about','tags' : Tag.objects.all()})
def post_detail(request, id, slug):
post = get_object_or_404(Post, id=id, slug=slug)
is_liked = False
if post.likes.filter(id=request.user.id).exists():
is_liked = True
context = {
'post' : post,
'is_liked' : is_liked,
'total_likes' : post.total_likes(),
'tags' : Tag.objects.all(),
}
return render(request, 'blog/post_detail.html', context)
@login_required
def add_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
username = request.user.username
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.author = username
comment.save()
return redirect('post-detail', pk=post.id)#avant pk = post.pk
else:
form = CommentForm
return render(request,'blog/comment_form.html',{'form': form})
@login_required
def comment_approve(request,pk) :
comment = get_object_or_404(Comment, pk=pk)
comment.approve()
return redirect('post-detail', pk = comment.post.pk)
@login_required
def comment_remove(request,pk) :
comment = get_object_or_404(Comment, pk=pk)
post_pk = comment.post.pk
comment.delete()
return redirect('post-detail', pk = post_pk)
def add_anonymous_comment_to_post(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = AnonymousCommentForm(request.POST)
if form.is_valid():
author = form.cleaned_data["author"]
comment = form.save(commit=False)
comment.post = post
comment.author = "Anonymous-"+author
comment.approved_comment = False
comment.save()
return redirect('post-detail', pk=post.id)#avant pk = post.pk
else:
form = AnonymousCommentForm
return render(request,'blog/comment_form.html',{'form': form})
#Answers
def new_answer(request):
sauvegarde = False
questions=Question.objects.all()
for question in questions :
question.answer = question.answer_set.all()
form = AnswerForm(request.POST or None, request.FILES)
if form.is_valid():
answer_content = form.cleaned_data["answer_content"]
qid = request.POST.get('qid', None)
question = get_object_or_404(Question,qid=qid)
answer = Answer()
answer.answer_content=answer_content
answer.posted_by = request.user.username
answer.qid=question
answer.save()
sauvegarde = True
form = AnswerForm()
return render(request, 'blog/question.html', {
'form': form,
'sauvegarde': sauvegarde,
'questions': questions
})
##Homepage
def home(request):
posts =Post.objects.all().order_by('-time_of_publishment')
paginator = Paginator(posts, 5)
page = request.GET.get('page')
posts = paginator.get_page(page)
context = {
'posts': posts,
'tags' : Tag.objects.all(),
'newprofiles' : Profile.objects.filter(approved_user="False"),
'pending_rolesprofiles' : Profile.objects.filter(change_role="True")
}
return render(request, 'blog/home.html',context)
#sorting from newest to oldest
def newest(request):
posts =Post.objects.all().order_by('-time_of_publishment')
paginator = Paginator(posts, 5)
page = request.GET.get('page')
posts = paginator.get_page(page)
context = {
'posts': posts,
'tags' : Tag.objects.all(),
'newprofiles' : Profile.objects.filter(approved_user="False"),
'pending_rolesprofiles' : Profile.objects.filter(change_role="True")
}
return render(request, 'blog/home.html',context)
#sorting from oldest to newest
def oldest(request):
posts =Post.objects.all().order_by('time_of_publishment')
paginator = Paginator(posts, 5)
page = request.GET.get('page')
posts = paginator.get_page(page)
context = {
'posts': posts,
'tags' : Tag.objects.all(),
'newprofiles' : Profile.objects.filter(approved_user="False"),
'pending_rolesprofiles' : Profile.objects.filter(change_role="True")
}
return render(request, 'blog/home.html',context)
def get_posts_by_tag(request,title):
tag = get_object_or_404(Tag, title=title)
postsTag = tag.post_set.all()
tags = Tag.objects.all()
return render(request, 'blog/home.html', {'posts': postsTag,'tags':tags})
def displayPost(request,pk):
post = Post.objects.get(id=pk)
tags = Tag.objects.all()
form = CommentForm
anonym_form = AnonymousCommentForm
return render(request, 'blog/post_detail.html', {'post': post,'tags':tags,'form':form,'anonym_form':anonym_form})
def list_myPosts(request,id_user):
user = get_object_or_404(User, id=id_user)
myPosts = user.post_set.all()
context = {
'posts': myPosts,
'tags' : Tag.objects.all()
}
return render(request, 'blog/home.html',context)
def contact(request):
return render(request, 'blog/contactus.html', {'title':'contact'})
###Profile managers
@login_required
def profile_approve(request,pk) :
profile = get_object_or_404(Profile, pk=pk)
profile.approve()
context = {
'posts': Post.objects.all(),
'tags' : Tag.objects.all(),
'newprofiles' : Profile.objects.filter(approved_user="False"),
'pending_rolesprofiles' : Profile.objects.filter(change_role="True")
}
return render(request, 'blog/home.html',context)
@login_required
def profile_remove(request,pk) :
profile = get_object_or_404(Profile, pk=pk)
profile.delete()
context = {
'posts': Post.objects.all(),
'tags' : Tag.objects.all(),
'newprofiles' : Profile.objects.filter(approved_user="False"),
'pending_rolesprofiles' : Profile.objects.filter(change_role="True")
}
return render(request, 'blog/home.html',context)
## Roles Managers :
@login_required
def role_approve(request,pk) :
profile = get_object_or_404(Profile, pk=pk)
profile.changeRole()
context = {
'posts': Post.objects.all(),
'tags' : Tag.objects.all(),
'newprofiles' : Profile.objects.filter(approved_user="False"),
'pending_rolesprofiles' : Profile.objects.filter(change_role="True")
}
return render(request, 'blog/home.html',context)
@login_required
def role_remove(request,pk) :
profile = get_object_or_404(Profile, pk=pk)
profile.change_role = False
profile.new_type= 'Anonymous'
profile.save()
context = {
'posts': Post.objects.all(),
'tags' : Tag.objects.all(),
'newprofiles' : Profile.objects.filter(approved_user="False"),
'pending_rolesprofiles' : Profile.objects.filter(change_role="True")
}
return render(request, 'blog/home.html',context)
|
class Company(object):
def __init__(self, employee_list):
self.employee = employee_list
def __getitem__(self, item):
"""任意一个类都可以添加魔法函数,返回item位置的值"""
return self.employee[item]
def __len__(self):
return len(self.employee)
def __str__(self):
"""打印时"""
return ",".join(self.employee)
def __repr__(self):
"""shell中"""
return "shell: " + ",".join(self.employee)
company = Company(["tom", "bob", "jane"])
company1 = company[:2]
print(len(company1))
for em in company1:
print(em)
print(len(company))
print(company)
|
import os
import tempfile
import unittest
from unittest.mock import patch
from local_server.common.annotations.local_file_csv import AnnotationsLocalFile
from local_server.common.config.app_config import AppConfig
from local_server.common.config.base_config import BaseConfig
from local_server.test import FIXTURES_ROOT, H5AD_FIXTURE
from local_server.common.errors import ConfigurationError
from local_server.test.unit.common.config import ConfigTests
class TestDatasetConfig(ConfigTests):
def setUp(self):
self.config_file_name = f"{unittest.TestCase.id(self).split('.')[-1]}.yml"
self.config = AppConfig()
self.config.update_server_config(app__flask_secret_key="secret")
self.config.update_server_config(single_dataset__datapath=H5AD_FIXTURE)
self.dataset_config = self.config.dataset_config
self.config.complete_config()
message_list = []
def noop(message):
message_list.append(message)
messagefn = noop
self.context = dict(messagefn=messagefn, messages=message_list)
def get_config(self, **kwargs):
file_name = self.custom_app_config(dataset_datapath=H5AD_FIXTURE, **kwargs)
config = AppConfig()
config.update_from_config_file(file_name)
return config
def test_init_datatset_config_sets_vars_from_config(self):
config = AppConfig()
self.assertEqual(config.dataset_config.presentation__max_categories, 1000)
self.assertEqual(config.dataset_config.user_annotations__type, "local_file_csv")
self.assertEqual(config.dataset_config.diffexp__lfc_cutoff, 0.01)
self.assertIsNone(config.dataset_config.user_annotations__ontology__obo_location)
@patch("local_server.common.config.dataset_config.BaseConfig.validate_correct_type_of_configuration_attribute")
def test_complete_config_checks_all_attr(self, mock_check_attrs):
mock_check_attrs.side_effect = BaseConfig.validate_correct_type_of_configuration_attribute()
self.dataset_config.complete_config(self.context)
self.assertIsNotNone(self.config.server_config.data_adaptor)
self.assertEqual(mock_check_attrs.call_count, 19)
def test_app_sets_script_vars(self):
config = self.get_config(scripts=["path/to/script"])
config.dataset_config.handle_app()
self.assertEqual(config.dataset_config.app__scripts, [{"src": "path/to/script"}])
config = self.get_config(scripts=[{"src": "path/to/script", "more": "different/script/path"}])
config.dataset_config.handle_app()
self.assertEqual(
config.dataset_config.app__scripts, [{"src": "path/to/script", "more": "different/script/path"}]
)
config = self.get_config(scripts=["path/to/script", "different/script/path"])
config.dataset_config.handle_app()
# TODO @madison -- is this the desired functionality?
self.assertEqual(
config.dataset_config.app__scripts, [{"src": "path/to/script"}, {"src": "different/script/path"}]
)
config = self.get_config(scripts=[{"more": "different/script/path"}])
with self.assertRaises(ConfigurationError):
config.dataset_config.handle_app()
def test_handle_user_annotations_ensures_auth_is_enabled_with_valid_auth_type(self):
config = self.get_config(enable_users_annotations="true", authentication_enable="false")
config.server_config.complete_config(self.context)
with self.assertRaises(ConfigurationError):
config.dataset_config.handle_user_annotations(self.context)
config = self.get_config(enable_users_annotations="true", authentication_enable="true", auth_type="pretend")
with self.assertRaises(ConfigurationError):
config.server_config.complete_config(self.context)
def test_handle_user_annotations__instantiates_user_annotations_class_correctly(self):
config = self.get_config(
enable_users_annotations="true", authentication_enable="true", annotation_type="local_file_csv"
)
config.server_config.complete_config(self.context)
config.dataset_config.handle_user_annotations(self.context)
self.assertIsInstance(config.dataset_config.user_annotations, AnnotationsLocalFile)
config = self.get_config(
enable_users_annotations="true", authentication_enable="true", annotation_type="NOT_REAL"
)
config.server_config.complete_config(self.context)
with self.assertRaises(ConfigurationError):
config.dataset_config.handle_user_annotations(self.context)
def test_handle_local_file_csv_annotations__sets_dir_if_not_passed_in(self):
config = self.get_config(
enable_users_annotations="true", authentication_enable="true", annotation_type="local_file_csv"
)
config.server_config.complete_config(self.context)
config.dataset_config.handle_local_file_csv_annotations(self.context)
self.assertIsInstance(config.dataset_config.user_annotations, AnnotationsLocalFile)
cwd = os.getcwd()
self.assertEqual(config.dataset_config.user_annotations._get_output_dir(), cwd)
def test_handle_embeddings__checks_data_file_types(self):
file_name = self.custom_app_config(
embedding_names=["name1", "name2"],
enable_reembedding="true",
dataset_datapath=f"{FIXTURES_ROOT}/pbmc3k-CSC-gz.h5ad",
anndata_backed="true",
config_file_name=self.config_file_name,
)
config = AppConfig()
config.update_from_config_file(file_name)
config.server_config.complete_config(self.context)
with self.assertRaises(ConfigurationError):
config.dataset_config.handle_embeddings()
def test_handle_diffexp__raises_warning_for_large_datasets(self):
config = self.get_config(lfc_cutoff=0.02, enable_difexp="true", top_n=15)
config.server_config.complete_config(self.context)
config.dataset_config.handle_diffexp(self.context)
self.assertEqual(len(self.context["messages"]), 1)
def test_configfile_with_specialization(self):
# test that per_dataset_config config load the default config, then the specialized config
with tempfile.TemporaryDirectory() as tempdir:
configfile = os.path.join(tempdir, "config.yaml")
with open(configfile, "w") as fconfig:
config = """
server:
single_dataset:
datapath: fake_datapath
dataset:
user_annotations:
enable: false
type: local_file_csv
local_file_csv:
file: fake_file
directory: fake_dir
"""
fconfig.write(config)
app_config = AppConfig()
app_config.update_from_config_file(configfile)
test_config = app_config.dataset_config
# test config from default
self.assertEqual(test_config.user_annotations__type, "local_file_csv")
self.assertEqual(test_config.user_annotations__local_file_csv__file, "fake_file")
|
import operator
from functools import reduce
from typing import TYPE_CHECKING, Any, Dict, List, Type
from fedot.core.optimisers.opt_history import OptHistory
if TYPE_CHECKING:
from fedot.core.optimisers.gp_comp.individual import Individual
from . import any_from_json
def _convert_parent_individuals(individuals: List[List['Individual']]) -> List[List['Individual']]:
# get all individuals from all generations
all_individuals = reduce(operator.concat, individuals)
lookup_dict = {ind.uid: ind for ind in all_individuals}
for ind in all_individuals:
for parent_op in ind.parent_operators:
for parent_ind_idx, parent_ind_uid in enumerate(parent_op.parent_individuals):
parent_op.parent_individuals[parent_ind_idx] = lookup_dict.get(parent_ind_uid, None)
return individuals
def opt_history_from_json(cls: Type[OptHistory], json_obj: Dict[str, Any]) -> OptHistory:
deserialized = any_from_json(cls, json_obj)
deserialized.individuals = _convert_parent_individuals(deserialized.individuals)
deserialized.archive_history = _convert_parent_individuals(deserialized.archive_history)
return deserialized
|
from frappe.website.page_renderers.template_page import TemplatePage
class ErrorPage(TemplatePage):
def __init__(self, path=None, http_status_code=None, exception=None):
path = "error"
super().__init__(path=path, http_status_code=http_status_code)
self.exception = exception
def can_render(self):
return True
def init_context(self):
super().init_context()
self.context.http_status_code = getattr(self.exception, "http_status_code", None) or 500
self.context.error_title = getattr(self.exception, "title", None)
self.context.error_message = getattr(self.exception, "message", None)
|
"""Utility methods to work with file read and write"""
import os
import gzip
import datetime
import zipfile
# local imports
from omigo_core import s3_wrapper
from omigo_core import utils
# constant
NUM_HOURS = 24
# method to read the data
def read_filepaths(path, start_date_str, end_date_str, fileprefix, s3_region, aws_profile, granularity, ignore_missing = False):
if (granularity == "hourly"):
return read_filepaths_hourly(path, start_date_str, end_date_str, fileprefix, s3_region, aws_profile, "", ignore_missing)
elif (granularity == "daily"):
return read_filepaths_daily(path, start_date_str, end_date_str, fileprefix, s3_region, aws_profile, "", ignore_missing)
else:
raise Exception("Unknown granularity value", granularity)
# this returns the etl prefix for creating directory depth
def get_etl_level_prefix(curdate, etl_level):
prefix = "/"
if (etl_level == ""):
return prefix
parts = etl_level.split(",")
for part in parts:
if (part == "year"):
f = "%Y"
elif (part == "month"):
f = "%m"
elif (part == "day"):
f = "%d"
else:
raise Exception("Invalid value for etl_level :", etl_level, part)
prefix = prefix + part + "-" + str(curdate.strftime(f)) + "/"
return prefix
def read_filepaths_hourly(path, start_date_str, end_date_str, fileprefix, s3_region, aws_profile, etl_level, ignore_missing):
# parse input dates
start_date = datetime.datetime.strptime(start_date_str,"%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date_str,"%Y-%m-%d")
# construct paths based on the dates
duration = end_date - start_date
# print("read_filepaths_hourly: Number of days:", duration.days + 1)
# get the list of file paths
filepaths = []
# loop through each day and construct the path
for i in range(duration.days + 1):
for j in range(NUM_HOURS):
curdatetime = start_date + datetime.timedelta(days = i) + datetime.timedelta(hours = j)
etl_prefix = get_etl_level_prefix(curdatetime, etl_level)
filepath_tsv = path + etl_prefix + fileprefix + "-" + curdatetime.strftime("%Y%m%d-%H0000") + ".tsv"
filepath_tsvgz = filepath_tsv + ".gz"
filepath_tsvzip = filepath_tsv + ".zip"
# check if this is s3 file
if (filepath_tsv.startswith("s3://")):
if (s3_wrapper.check_path_exists(filepath_tsv, s3_region, aws_profile)):
filepaths.append(filepath_tsv)
elif (s3_wrapper.check_path_exists(filepath_tsvgz, s3_region, aws_profile)):
filepaths.append(filepath_tsvgz)
elif (s3_wrapper.check_path_exists(filepath_tsvzip, s3_region, aws_profile)):
filepaths.append(filepath_tsvzip)
else:
if (ignore_missing == False):
raise Exception("Input files don't exist. Use ignore_missing if want to continue: ", filepath_tsv, filepath_tsvgz, filepath_tsvzip)
else:
continue
else:
# check if file exists
if (os.path.exists(filepath_tsv)):
filepaths.append(filepath_tsv)
elif (os.path.exists(filepath_tsvgz)):
filepaths.append(filepath_tsvgz)
elif (os.path.exists(filepath_tsvzip)):
filepaths.append(filepath_tsvzip)
else:
if (ignore_missing == False):
raise Exception("Input files don't exist. Use ignore_missing if want to continue: ", filepath_tsv, filepath_tsvgz, filepath_tsvzip)
else:
continue
# return filepaths
return filepaths
def check_exists(path, s3_region = None, aws_profile = None):
if (path.startswith("s3://") and s3_wrapper.check_path_exists(path, s3_region, aws_profile)):
return True
if (os.path.exists(path)):
return True
return False
def read_filepaths_daily(path, start_date_str, end_date_str, fileprefix, s3_region, aws_profile, etl_level, ignore_missing):
# parse input dates
start_date = datetime.datetime.strptime(start_date_str, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date_str, "%Y-%m-%d")
# construct paths based on the dates
duration = end_date - start_date
#print("read_filepaths_daily: Number of days:", duration.days + 1)
# get the list of file paths
filepaths = []
# loop through each day and construct the path
for i in range(duration.days + 1):
curdate = start_date + datetime.timedelta(days = i)
etl_prefix = get_etl_level_prefix(curdate, etl_level)
filepath_tsv = path + etl_prefix + fileprefix + "-" + curdate.strftime("%Y%m%d") + "-" + curdate.strftime("%Y%m%d") + ".tsv"
filepath_tsvgz = filepath_tsv + ".gz"
# check if this is s3 file
if (filepath_tsv.startswith("s3://") or filepath_tsvgz.startswith("s3://")):
if (s3_wrapper.check_path_exists(filepath_tsv, s3_region, aws_profile)):
filepaths.append(filepath_tsv)
elif (s3_wrapper.check_path_exists(filepath_tsvgz, s3_region, aws_profile)):
filepaths.append(filepath_tsvgz)
else:
if (ignore_missing == False):
raise Exception("Input files don't exist. Use ignore_missing if want to continue: ", filepath_tsv, filepath_tsvgz)
else:
continue
else:
# check if file exists
if (os.path.exists(filepath_tsv)):
filepaths.append(filepath_tsv)
elif (os.path.exists(filepath_tsvgz)):
filepaths.append(filepath_tsvgz)
else:
if (ignore_missing == False):
raise Exception("Input files don't exist. Use ignore_missing if want to continue: ", filepath_tsv, filepath_tsvgz)
else:
continue
# return filepaths
return filepaths
# check if the files in the filepaths have the same header
def has_same_headers(filepaths, s3_region = None, aws_profile = None):
# headers set
header_set = {}
# read the headers to make sure that all files are same
for filepath in filepaths:
# print(filepath)
# read content
lines = read_file_content_as_lines(filepath, s3_region, aws_profile)
# read header
headerline = lines[0].rstrip("\n")
if ((headerline in header_set.keys()) == False):
header_set[headerline] = filepath
# check for no data
if (len(header_set) == 0):
print("Error in reading the files. No content.")
return False
# check for multiple headers
if (len(header_set) > 1):
print("Multiple headers found for the date range. Use a different date range.")
for k, v in header_set.items():
print("Path:", v, ", header:", k, "\n")
return False
# return all the filepaths
return True
# create a hashmap of header fields
def create_header_map(header):
header_map = {}
parts = header.split("\t")
for i in range(len(parts)):
header_map[parts[i]] = i
return header_map
def create_header_index_map(header):
header_map = {}
parts = header.split("\t")
for i in range(len(parts)):
header_map[i] = parts[i]
return header_map
def read_file_content_as_lines(path, s3_region = None, aws_profile = None):
# check for s3
if (path.startswith("s3://")):
bucket_name, object_key = utils.split_s3_path(path)
data = s3_wrapper.get_s3_file_content_as_text(bucket_name, object_key, s3_region, aws_profile)
data = data.split("\n")
else:
if (path.endswith(".gz")):
fin = gzip.open(path, mode = "rt")
data = [x.rstrip("\n") for x in fin.readlines()]
fin.close()
elif (path.endswith(".zip")):
zipf = zipfile.ZipFile(path, "r")
fin = zipf.open(zipf.infolist()[0], "r")
data = fin.read().decode().split("\n")
fin.close()
zipf.close()
else:
fin = open(path, "r")
data = [x.rstrip("\n") for x in fin.readlines()]
fin.close()
# simple csv parser
if (path.endswith(".csv") or path.endswith("csv.gz") or path.endswith(".csv.zip")):
utils.warn("Found a CSV file. Only simple csv format is supported")
data = [x.replace(",", "\t") for x in data]
# return
return data
def parse_date_multiple_formats(date_str):
# check for yyyy-MM-dd
if (len(date_str) == 10):
return datetime.datetime.strptime(date_str, "%Y-%m-%d")
elif (len(date_str) == 19):
date_str = date_str.replace("T", " ")
return datetime.datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
else:
raise Exception("Unknownd datetime format:" + date_str)
def create_date_numeric_representation(date_str, default_suffix):
# check for yyyy-MM-dd
if (len(date_str) == 10):
return str(date_str.replace("-", "") + default_suffix)
elif (len(date_str) == 19):
return str(date_str.replace("-", "").replace("T", "").replace(":", ""))
else:
raise Exception("Unknownd datetime format:" + date_str)
# this is not a lookup function. This reads directory listing, and then picks the filepaths that match the criteria
def get_file_paths_by_datetime_range(path, start_date_str, end_date_str, prefix, spillover_window = 1, num_par = 10, wait_sec = 1, s3_region = None, aws_profile = None):
# parse dates
start_date = parse_date_multiple_formats(start_date_str)
end_date = parse_date_multiple_formats(end_date_str)
# get number of days inclusive start and end and include +/- 1 day buffer for overlap
num_days = (end_date - start_date).days + 1 + (spillover_window * 2)
start_date_minus_window = start_date - datetime.timedelta(days = spillover_window)
# create a numeric representation of date
start_date_numstr = create_date_numeric_representation(start_date_str, "000000")
end_date_numstr = create_date_numeric_representation(end_date_str, "999999")
# create variable to store results
tasks = []
# iterate and create tasks
for d in range(num_days):
# generate the current path based on date
cur_date = start_date_minus_window + datetime.timedelta(days = d)
cur_path = path + "/dt=" + cur_date.strftime("%Y%m%d")
# get the list of files. This needs to be failsafe as not all directories may exist
if (path.startswith("s3://")):
tasks.append(utils.ThreadPoolTask(s3_wrapper.get_directory_listing, cur_path, filter_func = None, fail_if_missing = False, region = s3_region, profile = aws_profile))
else:
tasks.append(utils.ThreadPoolTask(get_local_directory_listing, cur_path, fail_if_missing = False))
# execute the tasks
results = utils.run_with_thread_pool(tasks, num_par = num_par, wait_sec = wait_sec)
# final result
paths_found = []
# iterate over results
for files_list in results:
# debug
utils.trace("file_paths_util: get_file_paths_by_datetime_range: number of candidate files to read: cur_date: {}, count: {}".format(cur_date, len(files_list)))
# apply filter on the name and the timestamp
for filename in files_list:
#format: full_prefix/fileprefix-startdate-enddate-starttime-endtime.tsv
# get the last part after /
#sep_index = filename.rindex("/")
#filename1 = filename[sep_index + 1:]
base_filename = filename[len(cur_path) + 1:]
# get extension
if (base_filename.endswith(".tsv.gz")):
ext_index = base_filename.rindex(".tsv.gz")
elif (base_filename.endswith(".tsv")):
ext_index = base_filename.rindex(".tsv")
# proceed only if valid filename
if (ext_index != -1):
# strip the extension
filename2 = base_filename[0:ext_index]
filename3 = filename2[len(prefix) + 1:]
parts = filename3.split("-")
# the number of parts must be 3
if (len(parts) == 4):
# get the individual parts in the filename
cur_start_ts = str(parts[0]) + str(parts[1])
cur_end_ts = str(parts[2]) + str(parts[3])
# apply the filter condition
if (not (str(end_date_numstr) < cur_start_ts or str(start_date_numstr) > cur_end_ts)):
# note filename1
paths_found.append(filename)
# return
return paths_found
def get_local_directory_listing(path, fail_if_missing = True):
if (check_exists(path) == False):
if (fail_if_missing):
raise Exception("Directory does not exist:", path)
else:
return []
else:
full_paths = []
for p in os.listdir(path):
full_paths.append(path + "/" + p)
return full_paths
# this method is not robust against complex path creations with dot(.). FIXME
def create_local_parent_dir(filepath):
# if it is a local file, create the parent directory
if (filepath.startswith("s3://") == True):
raise Exception("filepath is in S3:" + filepath)
# split the path and fetch the parent directory
parts = list(filter(lambda x: len(x) > 0, filepath.split("/")))
if (len(parts) > 1):
dir_path = "/".join(parts[0:-1])
# prepend the "/" prefix if the path started from root directory
if (filepath.startswith("/")):
dir_path = "/" + dir_path
if (check_exists(dir_path, None, None) == False):
if (utils.is_debug()):
print("Creating local directory:", dir_path)
os.makedirs(dir_path, exist_ok = True)
|
from _csv import Error
from contextlib import contextmanager
import re
from django.forms.fields import ChoiceField, CharField
from django import forms
from django.forms import FileField
import csv
from django.forms.forms import Form, DeclarativeFieldsMetaclass, BoundField
from django.forms.widgets import Input, HiddenInput
from django.template.context import RequestContext
from django.utils.encoding import force_unicode, smart_str
from django.utils import formats
from django.conf import settings
import tempfile
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.urlresolvers import reverse
from django.db.models.fields.related import ForeignKey
from django.db.models.loading import get_model, get_models, get_apps, get_app
from django.shortcuts import render_to_response, redirect
import os
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.views.generic.base import View
from django.forms.fields import CharField, BooleanField
from django.utils.translation import ugettext_lazy as _
delimiters = ",;|:"
quotes = "'\"`"
escapechars = " \\"
def _get_all_models(filter_app_name=None):
all_models = []
if filter_app_name:
apps = [get_app(filter_app_name)]
else:
apps = get_apps()
from django.contrib.admin import site
for app in apps:
for mod in get_models(app):
if mod in site._registry:
all_models.append("%s:%s" % (mod._meta.app_label, force_unicode(mod._meta.verbose_name)))
return zip(all_models, all_models)
@contextmanager
def open_csv(filename):
fd = open(filename, 'rb')
dialect = csv.Sniffer().sniff(fd.read(2048))
fd.seek(0)
csv_reader = csv.reader(fd, dialect=dialect)
yield csv_reader
fd.close()
class CsvFileField(FileField):
def clean(self, data, initial=None):
if not data and initial:
return initial
ret = super(FileField, self).clean(data)
if ret:
try:
_dialect = csv.Sniffer().sniff(data.read(2048))
data.seek(0)
csv_reader = csv.reader(data, dialect=_dialect)
for i in range(10):
csv_reader.next()
except Error, e:
raise ValidationError("Unable to load csv file (%s)" % e)
return ret
class ImportForm(Form):
model = ChoiceField()
csv = CsvFileField()
def __init__(self, app, model, data=None, files=None, auto_id='id_%s', prefix=None, initial=None):
super(ImportForm, self).__init__(data, files, auto_id, prefix, initial)
if self.data:
app, model = self.data['model'].split(':')
if model:
m = "%s:%s" % ( app, model)
self.fields['model'].choices = [(m, m)]
self.fields['model'].widget = Input({'readonly': 'readonly'})
self.initial['model'] = m
elif app:
self.fields['model'].choices = _get_all_models(app)
else:
self.fields['model'].choices = _get_all_models()
class CSVPRocessorForm(Form):
header = BooleanField(label='Header', initial=False, required=False)
validate = BooleanField(label='Form validation', initial=False, required=False)
preview_all = BooleanField(label='Preview all records', initial=False, required=False)
create_missing = BooleanField(label='Create missing rows', initial=False, required=False)
def _head(self, rows=10):
with open_csv(self._filename) as csv:
output = []
for i in range(rows):
output.append(csv.next())
return output
def clean(self):
found = False
# todo: we should try to create a dummy model to force some validation ??
for i, f in enumerate(self._fields):
fld = 'fld_%s' % i
col = 'col_%s' % i
lkf = 'lkf_%s' % i
column = self.cleaned_data[col]
field_name = self.cleaned_data[fld]
lookup_name = self.cleaned_data[lkf]
if column >= 0 or field_name:
found = True
if not ( column >= 0 and field_name):
self._errors[fld] = self.error_class([_("Please set both 'column' and 'field'")])
raise ValidationError("Please fix errors below")
Field, _u, _u, _u = self._model._meta.get_field_by_name(field_name)
if isinstance(Field, ForeignKey):
if not lookup_name:
self._errors[fld] = self.error_class([_('Please set lookup field name for "%s"') % field_name])
else:
try:
Field.rel.to._meta.get_field_by_name(lookup_name)
except Exception, e:
self._errors[fld] = self.error_class([e])
if not found:
raise ValidationError("Please set columns mapping")
return self.cleaned_data
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name in ('header', 'preview_all', 'validate', 'create_missing'):
field = self.fields[name]
bf = BoundField(self, field, name)
bf_errors = self.error_class(
[conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf_errors:
top_errors.extend([u'(Hidden field %s) %s' % (name, force_unicode(e)) for e in bf_errors])
output.append('<tr><td class="label" colspan="4">%s</td><td>%s</td></tr>' % (bf.label, unicode(bf)))
output.append(
u'<tr><th>%s</th><th>%s</th><th class="rex">%s</th><th class="lkf">%s</th><th class="key">%s</th></tr>' % (
_('Column'), _('Field'), _('Regex'), _('Lookup Field'), _('pk')))
for i, f in enumerate(self._fields):
line = []
error_line = []
rowid = self.fields['col_%s' % i].label
for n in ('col_%s', 'fld_%s', 'rex_%s', 'lkf_%s', 'key_%s'):
name = n % i
field = self.fields[name]
bf = BoundField(self, field, name)
bf_errors = self.error_class(
[conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
error_line.append(force_unicode(bf_errors), )
line.append('<td class=%(class)s>%(field)s</td>' %
{'field': unicode(bf),
'class': n[:3]}
)
output.append('<tr><td colspan="5">%s</td></tr>' % ''.join(error_line))
output.append('<tr>%(line)s</tr>' % {'line': ''.join(line), 'rowid': rowid})
if top_errors:
output.insert(0, error_row % force_unicode(top_errors))
return mark_safe(u'\n'.join(output))
def as_hidden(self):
output, hidden_fields = [], []
for name, field in self.fields.items():
field.widget = HiddenInput({'readonly': 'readonly'})
bf = BoundField(self, field, name)
output.append(unicode(bf))
return mark_safe(u'\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row=u'<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row=u'<tr><td colspan="4">%s</td></tr>',
row_ender=u'</td></tr>',
help_text_html=u'<br /><span class="helptext">%s</span>',
errors_on_separate_row=False)
def csv_processor_factory(app_name, model_name, csv_filename):
"""
factory for Model specific CSVPRocessorForm
"""
rows = []
fd = open(csv_filename, 'rb')
dialect = csv.Sniffer().sniff(fd.read(2048))
fd.seek(0)
csv_reader = csv.reader(fd, dialect=dialect)
for i in range(10):
rows.append(csv_reader.next())
fd.close()
columns_count = len(rows[0])
model = get_model(app_name, model_name)
model_fields = [('', '-- ignore --')] + [(f.name, f.name) for f in model._meta.fields]
columns_def = [(-1, '-- ignore --')] + [(i, "Column %s" % i) for i in range(columns_count)]
class_name = "%s%sImportForm" % (app_name, model_name)
attrs = {
# 'header': BooleanField(label='Header', initial=False, required=False),
# 'validate': BooleanField(label='Form validation', initial=False, required=False),
# 'preview_all': BooleanField(label='Preview all records', initial=False, required=False),
# 'create_missing': BooleanField(label='Create missing rows', initial=False, required=False),
'columns_count': columns_count,
'sample': rows,
'_model': model,
'_fields': model._meta.fields,
'_filename': csv_filename,
'_dialect': dialect
}
for i, f in enumerate(model._meta.fields):
# column, field, regex to manipulate column value, lookup field name for foreign-keys, primary key flag
attrs['col_%s' % i] = ColumnField(choices=columns_def, required=False)
attrs['fld_%s' % i] = ChoiceField(choices=model_fields, required=False)
attrs['rex_%s' % i] = RegexField(label='', initial='(.*)', required=False)
attrs['lkf_%s' % i] = CharField(required=False)
attrs['key_%s' % i] = BooleanField(label='', initial=False, required=False)
return DeclarativeFieldsMetaclass(str(class_name), (CSVPRocessorForm,), attrs)
class CSVOptions(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
header = forms.BooleanField(required=False)
delimiter = forms.ChoiceField(choices=zip(delimiters, delimiters))
quotechar = forms.ChoiceField(choices=zip(quotes, quotes))
quoting = forms.ChoiceField(
choices=((csv.QUOTE_ALL, 'All'), (csv.QUOTE_MINIMAL, 'Minimal'), (csv.QUOTE_NONE, 'None'),
(csv.QUOTE_NONNUMERIC, 'Non Numeric')))
escapechar = forms.ChoiceField(choices=(('', ''), ('\\', '\\')), required=False)
datetime_format = forms.CharField(initial=formats.get_format('DATETIME_FORMAT'))
date_format = forms.CharField(initial=formats.get_format('DATE_FORMAT'))
time_format = forms.CharField(initial=formats.get_format('TIME_FORMAT'))
columns = forms.MultipleChoiceField()
class CsvFileField(FileField):
def clean(self, data, initial=None):
if not data and initial:
return initial
ret = super(FileField, self).clean(data)
if ret:
try:
_dialect = csv.Sniffer().sniff(data.read(2048))
data.seek(0)
csv_reader = csv.reader(data, dialect=_dialect)
for i in range(10):
csv_reader.next()
except Error, e:
raise ValidationError("Unable to load csv file (%s)" % e)
return ret
class ColumnField(ChoiceField):
def to_python(self, value):
"Returns a Unicode object."
return int(value)
def valid_value(self, value):
return True
class RegexField(CharField):
def clean(self, value):
value = super(RegexField, self).clean(value)
if value:
try:
if not ('(' or '(') in value:
raise ValidationError
return re.compile(value)
except:
raise ValidationError(_("'%s' is not a valid regex pattern" % value))
return value
class Lookup(dict):
"""
a dictionary which can lookup value by key, or keys by value
"""
def __init__(self, items=None):
"""items can be a list of pair_lists or a dictionary"""
dict.__init__(self, items or [])
def get_key(self, value):
"""find the key(s) as a list given a value"""
return [item[0] for item in self.items() if item[1] == value][0]
def get_value(self, key):
"""find the value given a key"""
return self[key]
def _get_all_models(filter_app_name=None):
all_models = []
if filter_app_name:
apps = [get_app(filter_app_name)]
else:
apps = get_apps()
from django.contrib.admin import site
for app in apps:
for mod in get_models(app):
if mod in site._registry:
all_models.append("%s:%s" % (mod._meta.app_label, force_unicode(mod._meta.verbose_name)))
return zip(all_models, all_models)
def get_valid_choice(value, choices):
for k, v in choices:
if str(v) == str(value):
return True, v
return False, None
def update_model(request, original, updater, mapping):
for fname, v in updater.items():
_u, _u, _u, lookup_name, Field = mapping[fname]
if isinstance(Field, ForeignKey):
if lookup_name:
try:
v = Field.rel.to.objects.get(**{lookup_name: v})
except ObjectDoesNotExist, e:
raise ObjectDoesNotExist('%s %s' % (e, v))
setattr(original, fname, v)
return original
def set_model_attribute(instance, name, value, rex=None):
if value == 'None':
value = None
field, model, direct, m2m = instance._meta.get_field_by_name(name)
if isinstance(field, ForeignKey):
m = re.compile(rex).match(value)
elif hasattr(field, 'flatchoices'):
choices = Lookup(getattr(field, 'flatchoices'))
if value in choices.values():
value = choices.get_key(value)
setattr(instance, name, value)
#class IAdminPlugin(object):
# def __init__(self, adminsite):
# self.admin_site = adminsite
# self.name = adminsite.name
#
# def get_urls(self):
# pass
#
# @property
# def urls(self):
# return self.get_urls()
class CSVImporter(View):
"""
workflow
GET POST
start() -> page1
"""
template_step1 = None
template_step2 = None
template_step3 = None
def __init__(self, admin_site, app_label, model_name, page, **initkwargs):
self.admin_site = admin_site
self.app_label = app_label
self.model_name = model_name
self.page = page
self.app = get_app(self.app_label)
self.model = get_model(self.app_label, self.model_name)
return super(CSVImporter, self).__init__(**initkwargs)
def done(self, form_list, **kwargs):
return render_to_response('done.html', {
'form_data': [form.cleaned_data for form in form_list],
})
def get_context_data(self, **kwargs):
kwargs.update({'app_label': self.app_label.lower(),
'app': self.app,
'model': self.model,
'opts': self.model._meta,
'model_name': self.model_name.lower(),
# 'root_path': self.admin_site.root_path or '',
'lbl_next': 'Next >>',
'lbl_back': '<< Back',
'back': self.request.META.get('HTTP_REFERER', ''),
})
kwargs.setdefault('title', 'Import CSV File %s/3' % kwargs['page'], )
return kwargs
def __get_mapping(self, form):
mapping = {}
for i, f in enumerate(form._fields):
field_name = form.cleaned_data['fld_%s' % i]
column = form.cleaned_data['col_%s' % i]
rex = form.cleaned_data['rex_%s' % i]
lk = form.cleaned_data['lkf_%s' % i]
key = form.cleaned_data['key_%s' % i]
if column >= 0:
Field, _, _, _ = form._model._meta.get_field_by_name(field_name)
mapping[field_name] = [column, rex, bool(key), lk, Field]
return mapping
def _process_row(self, row, mapping):
record = {}
key = {}
for field_name, (col, rex, is_key, lk, Field) in mapping.items():
try:
raw_val = rex.search(row[col]).group(1)
field_value = None
if isinstance(Field, ForeignKey):
try:
field_value = Field.rel.to.objects.get(**{lk: raw_val})
except Field.rel.to.DoesNotExist:
pass
else:
field_value = Field.to_python(raw_val)
record[field_name] = field_value
if is_key:
key[field_name] = record[field_name]
except AttributeError, e:
raise AttributeError('Error processing "%s": Invalid regex' % field_name)
except Exception, e:
raise e.__class__('Error processing "%s"' % field_name, e)
return record, key
# def _step_1(self, request):
# context = self.get_context_data()
# if request.method == 'POST':
# form = ImportForm(self.app_label, self.model_name, request.POST, request.FILES)
# if form.is_valid():
# f = request.FILES['csv']
# fd = open(self.temp_file_name, 'wb')
# for chunk in f.chunks():
# fd.write(chunk)
# fd.close()
# if settings.DEBUG:
# messages.info(request, self.temp_file_name)
# app_name, model_name = form.cleaned_data['model'].split(':')
# goto_url = reverse('iadmin:import', kwargs={'app_label': self.app_label, 'model_name': self.model_name, 'page': 2})
# return HttpResponseRedirect(goto_url)
# else:
# form = ImportForm(self.app_label, self.model_name, initial={'page': 1})
#
# context.update({'page': 1, 'form': form, })
#
# return render_to_response(self.template_step1 or [
# "iadmin/%s/%s/import_csv_1.html" % (self.app_label, self.model_name),
# "iadmin/%s/import_csv_1.html" % self.app_label,
# "iadmin/import_csv_1.html"
# ], RequestContext(self.request, context))
#
# def post_2(self):
# records = []
# context = self.get_context_data()
# try:
# Form = csv_processor_factory(self.app_label, self.model_name, self.temp_file_name)
# form = Form(self.request.POST, self.request.FILES)
# if form.is_valid():
# mapping = self.__get_mapping(form)
# Model = get_model(self.app_label, self.model_name)
# with open_csv(self.temp_file_name) as csv:
# if form.cleaned_data['header']:
# csv.next()
# for i, row in enumerate(csv):
# if i > 20 and not form.cleaned_data['preview_all']:
# break
# try:
# sample = Model()
# record, key = self._process_row(row, mapping)
# exists = key and Model.objects.filter(**key).exists() or False
# if key and exists:
# sample = Model.objects.get(**key)
# else:
# sample = Model()
# sample = update_model(self.request, sample, record, mapping)
#
# records.append([sample, None, row])
# except (ValidationError, AttributeError), e:
# records.append([sample, str(e), row])
# except (ValueError, ObjectDoesNotExist, ValidationError), e:
# messages.error(self.request, '%s' % e)
# records.append([sample, str(e)])
# return self._step_3(self.request, {'records': records, 'form': form})
#
#
# context.update({'page': 2,
# 'form': form,
# 'back': reverse('iadmin:import', kwargs={'app_label': self.app_label, 'model_name': self.model_name, 'page': 1}),
# 'fields': form._fields,
# 'sample': form._head(),
# })
# return render_to_response(self.template_step2 or [
# "iadmin/%s/%s/import_csv_2.html" % (self.app_label, self.model_name.lower()),
# "iadmin/%s/import_csv_2.html" % self.app_label,
# "iadmin/import_csv_2.html"
# ], RequestContext(self.request, context))
#
# except IOError, e:
# messages.error(self.request, str(e))
# return redirect('iadmin:import', app_label=self.app_label, model_name=self.model_name, page=1)
#
# def _step_3(self, request, extra_context ):
# context = self.get_context_data()
# Model = get_model(self.app_label, self.model_name)
# extra_context = extra_context or {}
# if 'apply' in request.POST:
# Form = csv_processor_factory(self.app_label, self.model_name, self.temp_file_name)
# form = Form(request.POST, request.FILES)
# if form.is_valid():
# mapping = self.__get_mapping(form)
# Model = get_model(self.app_label, self.model_name)
# with open_csv(self.temp_file_name) as csv:
# if form.cleaned_data['header']:
# csv.next()
# for i, row in enumerate(csv):
# record, key = self._process_row(row, mapping)
# try:
# if key:
# if form.cleaned_data['create_missing']:
# sample, _ = Model.objects.get_or_create(**key)
# else:
# sample = Model.objects.get(**key)
# else:
# sample = Model()
# sample = update_model(request, sample, record, mapping)
# sample.save()
# except (IntegrityError, ObjectDoesNotExist), e:
# messages.error(request, '%s: %s' % (str(e), row) )
# return redirect('%s:%s_%s_changelist' % (self.name, self.app_label, self.model_name.lower()))
# else:
# pass
# context.update({'page': 3,
# 'fields': Model._meta.fields,
# 'back': reverse('iadmin:import', kwargs={'app_label': self.app_label, 'model_name': self.model_name, 'page': 2}),
# 'lbl_next': 'Apply',
# })
# context.update(extra_context)
# return render_to_response(self.template_step3 or [
# "iadmin/%s/%s/import_csv_3.html" % (self.app_label, self.model_name.lower()),
# "iadmin/%s/import_csv_3.html" % self.app_label,
# "iadmin/import_csv_3.html"
# ], RequestContext(request, context))
#
def start(self):
form = ImportForm(self.app_label, self.model_name, initial={'page': 1})
context = self.get_context_data(page=1, form=form)
return render_to_response(self.template_step1 or [
"iadmin/%s/%s/import_csv_1.html" % (self.app_label, self.model_name),
"iadmin/%s/import_csv_1.html" % self.app_label,
"iadmin/import_csv_1.html"
], RequestContext(self.request, context))
def load_csv(self):
form = ImportForm(self.app_label, self.model_name, self.request.POST, self.request.FILES)
if form.is_valid():
f = self.request.FILES['csv']
fd = open(self.temp_file_name, 'wb')
for chunk in f.chunks():
fd.write(chunk)
fd.close()
if settings.DEBUG:
messages.info(self.request, self.temp_file_name)
return self.display_mapping()
else:
context = self.get_context_data(page=1, form=form)
return render_to_response(self.template_step1 or [
"iadmin/%s/%s/import_csv_1.html" % (self.app_label, self.model_name),
"iadmin/%s/import_csv_1.html" % self.app_label,
"iadmin/import_csv_1.html"
], RequestContext(self.request, context))
def display_mapping(self):
Form = csv_processor_factory(self.app_label, self.model_name, self.temp_file_name)
form = Form(self.request.POST, self.request.FILES)
context = self.get_context_data(page=2, form=form)
# if self.page == '2':
# if form.is_valid():
# self.process_mapping(form)
return render_to_response(self.template_step1 or [
"iadmin/%s/%s/import_csv_2.html" % (self.app_label, self.model_name),
"iadmin/%s/import_csv_2.html" % self.app_label,
"iadmin/import_csv_2.html"
], RequestContext(self.request, context))
def process_mapping(self, form):
records = []
mapping = self.__get_mapping(form)
Model = get_model(self.app_label, self.model_name)
with open_csv(self.temp_file_name) as csv:
if form.cleaned_data['header']:
csv.next()
for i, row in enumerate(csv):
if i > 20 and not form.cleaned_data['preview_all']:
break
try:
sample = Model()
record, key = self._process_row(row, mapping)
exists = key and Model.objects.filter(**key).exists() or False
if key and exists:
sample = Model.objects.get(**key)
else:
sample = Model()
sample = update_model(self.request, sample, record, mapping)
records.append([sample, None, row])
except (ValidationError, AttributeError), e:
records.append([sample, str(e), row])
except (ValueError, ObjectDoesNotExist, ValidationError), e:
messages.error(self.request, '%s' % e)
records.append([sample, str(e)])
return self.preview({'records': records, 'form': form})
def preview(self, ):
context = self.get_context_data()
Model = get_model(self.app_label, self.model_name)
extra_context = extra_context or {}
context.update({'page': 3,
'fields': Model._meta.fields,
'back': reverse('iadmin:import',
kwargs={'app_label': self.app_label, 'model_name': self.model_name, 'page': 2}),
'lbl_next': 'Apply',
})
context.update(extra_context)
return render_to_response(self.template_step3 or [
"iadmin/%s/%s/import_csv_3.html" % (self.app_label, self.model_name.lower()),
"iadmin/%s/import_csv_3.html" % self.app_label,
"iadmin/import_csv_3.html"
], RequestContext(self.request, context))
@property
def temp_file_name(self):
filename = "%s_%s_%s_%s" % (
self.request.user.username, self.app_label, self.model_name, hash(self.request.user.password) )
return os.path.join(tempfile.gettempdir(), 'iadmin_import_%s.temp~' % filename)
def get(self, request, *args, **kwargs):
return self.start()
def post(self, request, *args, **kwargs):
if self.page == '1':
return self.load_csv()
elif self.page == '2':
return self.display_mapping()
elif self.page == '3':
return self.process_mapping()
#class CSVImporter2(IAdminPlugin):
# template_step1 = None
# template_step2 = None
# template_step3 = None
#
# def _get_base_context(self, request, app=None, model=None):
# return template.RequestContext(request, {'app_label': (app or '').lower(),
# 'model_name': (model or '').lower(),
# 'root_path': self.admin_site.root_path or '',
# 'lbl_next': 'Next >>',
# 'lbl_back': '<< Back',
# 'back': request.META['HTTP_REFERER'],
# },
# current_app=self.name)
#
# def __get_mapping(self, form):
# mapping = {}
# for i, f in enumerate(form._fields):
# field_name = form.cleaned_data['fld_%s' % i]
# column = form.cleaned_data['col_%s' % i]
# rex = form.cleaned_data['rex_%s' % i]
# lk = form.cleaned_data['lkf_%s' % i]
# key = form.cleaned_data['key_%s' % i]
#
# if column >= 0:
# Field, _, _, _ = form._model._meta.get_field_by_name(field_name)
# mapping[field_name] = [column, rex, bool(key), lk, Field]
# return mapping
#
# def _step_1(self, request, app=None, model=None, temp_file_name=None):
# context = self._get_base_context(request, app, model)
# if request.method == 'POST':
# form = ImportForm(app, model, request.POST, request.FILES)
# if form.is_valid():
# f = request.FILES['csv']
# fd = open(temp_file_name, 'wb')
# for chunk in f.chunks():
# fd.write(chunk)
# fd.close()
# if settings.DEBUG:
# messages.info(request, temp_file_name)
# app_name, model_name = form.cleaned_data['model'].split(':')
# goto_url = reverse('%s:model_import_csv' % self.name,
# kwargs={'app': app_name, 'model': model_name, 'page': 2})
# return HttpResponseRedirect(goto_url)
# else:
# form = ImportForm(app, model, initial={'page': 1})
#
# context.update({'page': 1, 'form': form, })
#
# return render_to_response(self.template_step1 or [
# "iadmin/%s/%s/import_csv_1.html" % (app, model),
# "iadmin/%s/import_csv_1.html" % app,
# "iadmin/import_csv_1.html"
# ], context)
#
#
# def _process_row(self, row, mapping):
# record = {}
# key = {}
# for field_name, (col, rex, is_key, lk, Field) in mapping.items():
# try:
# raw_val = rex.search(row[col]).group(1)
# field_value = None
# if isinstance(Field, ForeignKey):
# try:
# field_value = Field.rel.to.objects.get(**{lk: raw_val})
# except Field.rel.to.DoesNotExist:
# pass
# else:
# field_value = Field.to_python(raw_val)
# record[field_name] = field_value
# if is_key:
# key[field_name] = record[field_name]
# except AttributeError, e:
# raise AttributeError('Error processing "%s": Invalid regex' % field_name)
# except Exception, e:
# raise e.__class__('Error processing "%s"' % field_name, e)
# return record, key
#
# def _step_2(self, request, app_name=None, model_name=None, temp_file_name=None):
# records = []
# context = self._get_base_context(request, app_name, model_name)
# try:
# Form = csv_processor_factory(app_name, model_name, temp_file_name)
# if request.method == 'POST':
# form = Form(request.POST, request.FILES)
# if form.is_valid():
# mapping = self.__get_mapping(form)
# Model = get_model(app_name, model_name)
# with open_csv(temp_file_name) as csv:
# if form.cleaned_data['header']:
# csv.next()
# for i, row in enumerate(csv):
# if i > 20 and not form.cleaned_data['preview_all']:
# break
# try:
# sample = Model()
# record, key = self._process_row(row, mapping)
# exists = key and Model.objects.filter(**key).exists() or False
# if key and exists:
# sample = Model.objects.get(**key)
# else:
# sample = Model()
# sample = update_model(request, sample, record, mapping)
# records.append([sample, None, row])
# except (ValidationError, AttributeError), e:
# records.append([sample, str(e), row])
# except (ValueError, ObjectDoesNotExist, ValidationError), e:
# #messages.error(request, '%s' % e)
# records.append([sample, str(e)])
# return self._step_3(request, app_name, model_name, temp_file_name, {'records': records,
# 'form': form})
#
# else:
# form = Form()
#
# context.update({'page': 2,
# 'form': form,
# 'back': reverse('%s:model_import_csv' % self.name,
# kwargs={'app': app_name, 'model': model_name, 'page': 1}),
# 'fields': form._fields,
# 'sample': form._head(),
# })
# return render_to_response(self.template_step2 or [
# "iadmin/%s/%s/import_csv_2.html" % (app_name, model_name.lower()),
# "iadmin/%s/import_csv_2.html" % app_name,
# "iadmin/import_csv_2.html"
# ], context)
#
# except IOError, e:
# messages.error(request, str(e))
# return redirect('%s:model_import_csv' % self.name, app=app_name, model=model_name, page=1)
#
# def _step_3(self, request, app_name=None, model_name=None, temp_file_name=None, extra_context=None ):
# context = self._get_base_context(request, app_name, model_name)
# Model = get_model(app_name, model_name)
# extra_context = extra_context or {}
# if 'apply' in request.POST:
# Form = csv_processor_factory(app_name, model_name, temp_file_name)
# form = Form(request.POST, request.FILES)
# if form.is_valid():
# mapping = self.__get_mapping(form)
# Model = get_model(app_name, model_name)
# with open_csv(temp_file_name) as csv:
# if form.cleaned_data['header']:
# csv.next()
# for i, row in enumerate(csv):
# record, key = self._process_row(row, mapping)
# try:
# if key:
# if form.cleaned_data['create_missing']:
# sample, _ = Model.objects.get_or_create(**key)
# else:
# sample = Model.objects.get(**key)
# else:
# sample = Model()
# sample = update_model(request, sample, record, mapping)
# sample.save()
# except (IntegrityError, ObjectDoesNotExist), e:
# messages.error(request, '%s: %s' % (str(e), row) )
# return redirect('%s:%s_%s_changelist' % (self.name, app_name, model_name.lower()))
# else:
# pass
# context.update({'page': 3,
# 'fields': Model._meta.fields,
# 'back': reverse('%s:model_import_csv' % self.name,
# kwargs={'app': app_name, 'model': model_name, 'page': 2}),
# 'lbl_next': 'Apply',
# })
# context.update(extra_context)
# return render_to_response(self.template_step3 or [
# "iadmin/%s/%s/import_csv_3.html" % (app_name, model_name.lower()),
# "iadmin/%s/import_csv_3.html" % app_name,
# "iadmin/import_csv_3.html"
# ], context)
#
#
# def import_csv(self, request, page=1, app=None, model=None):
# temp_file_name = os.path.join(tempfile.gettempdir(), 'iadmin_import_%s_%s.temp~' % (
# request.user.username, hash(request.user.password)))
# if int(page) == 1:
# return self._step_1(request, app, model, temp_file_name=temp_file_name)
# elif int(page) == 2:
# if not 'HTTP_REFERER' in request.META:
# return redirect('%s:model_import_csv' % self.name, app=app, model=model, page=1)
# # todo: check referer
# return self._step_2(request, app, model, temp_file_name=temp_file_name)
# elif int(page) == 3:
# return self._step_3(request, app, model, temp_file_name=temp_file_name)
# raise Exception(page)
#
# def get_urls(self):
# def wrap(view, cacheable=False):
# def wrapper(*args, **kwargs):
# return self.admin_site.admin_view(view, cacheable)(*args, **kwargs)
#
# return update_wrapper(wrapper, view)
#
# return patterns('',
# url(r'^import/1$',
# wrap(self.import_csv),
# name='import_csv'),
#
# url(r'^(?P<app>\w+)/(?P<model>\w+)/import/(?P<page>\d)',
# wrap(self.import_csv),
# name='model_import_csv'),
#
# url(r'^(?P<app>\w+)/import/(?P<page>\d)',
# wrap(self.import_csv),
# name='app_import_csv'),
## )
|
# Generated by Django 2.2.19 on 2021-05-31 00:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bilbyui', '0013_auto_20210321_2322'),
]
operations = [
migrations.AddField(
model_name='label',
name='protected',
field=models.BooleanField(default=False),
),
]
|
from easyauth.router import EasyAuthAPIRouter
finance_router = EasyAuthAPIRouter.create(prefix="/finance", tags=["finance"])
@finance_router.get("/")
async def finance_root():
return "fiance_root"
@finance_router.get("/data")
async def finance_data():
return "finance_data"
print("finance setup")
|
from multiprocessing import Process
import RPi.GPIO as GPIO
import time
import subprocess
import psutil
GPIO.setmode(GPIO.BCM)
ledPin = 17
buttonPin = 18
GPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.LOW)
def buttonCheck():
while True:
input_state = GPIO.input(18)
if input_state == False:
print('button pressed')
# get the list of pids for SSH sessions on loopback
users = psutil.users()
for user in users:
if '127.0.0.1' in str(user.host):
subprocess.run(['kill', '-9', str(user.pid)])
time.sleep(0.25)
def sshCheck():
while True:
detected = False
users = psutil.users()
for user in users:
if '127.0.0.1' in str(user.host):
detected = True
if detected:
GPIO.output(ledPin, GPIO.HIGH)
else:
GPIO.output(ledPin, GPIO.LOW)
time.sleep(0.25)
if __name__ == '__main__':
Process(target=buttonCheck).start()
Process(target=sshCheck).start()
|
from NodeBox import NodeBox
from socket import *
class Node:
'''handles all node operations'''
def __init__(self):
self.nodebox = NodeBox()
self.socket = self.setup_socket()
self.version = "0.0.1"
def setup_socket(self, server_address = "127.0.0.1", server_port = 13337):
'''returns socket that will be used for communication with clients'''
# store server communication info in tuple for later
server_info = (server_address, server_port)
# create new socket, using ipv4 type address, tcp protocol
welcome_socket = socket(AF_INET, SOCK_STREAM)
# bind the socket to the given info (basically just setting port)
welcome_socket.bind(server_info)
# listen for incoming connections on the socket
welcome_socket.listen(1)
# return the socket for later usage
return welcome_socket
def socket_message_monitor(self):
'''monitor a given socket for messages'''
# should loop infinitely while server is running
while True:
# create a new communication socket if requested
communication_socket, client_address = self.socket.accept()
print("\n\t--------client connected--------")
# catch incoming messages (really just one big one)
incoming_data = ""
while True:
# receive data stream
data_catch = communication_socket.recv(2048).decode()
# print("data_catch: ", data_catch)
# continually add incoming data to storage string
incoming_data += data_catch
# wait for end of message indicator
# print("end: ", (incoming_data[-4:]))
if (incoming_data[-4:] == "####"):
break
# split up incoming data into list of messages
# print("before delete: ", incoming_data.split("$$$$"))
incoming_messages = incoming_data.split("$$$$")[:-1] # cut off end because it's not msg
# collect messages for user according to public key
collected_messages = self.nodebox.collect_messages(public_key = incoming_messages.pop(0))
# notify number of messages received from client
print("\n\treceived {} messages from client".format(len(incoming_messages)))
# print("collected_messages: ", collected_messages)
# build outgoing messages string
outgoing_messages = ""
for i in range(len(collected_messages)):
outgoing_messages += collected_messages[i] + "$$$$"
# outgoing_messages += "SEND_FINISHED" # may not be necessary when sending all
# encode into outgoing data, add end indicator
outgoing_data = (outgoing_messages + "####").encode()
# send messages back
communication_socket.send(outgoing_data)
print("\n\tdelivered {} messages to client".format(len(collected_messages)))
# print("done sending messages to client")
# finish, close socket
communication_socket.close()
print("\n\t-------client disconnected-------\n")
# move all received messages to NodeBox
# print("incoming messages: ", incoming_messages)
# print("len incoming: ", len(incoming_messages))
for i in range(len(incoming_messages)):
# print("adding received")
self.nodebox.add_message(incoming_messages[i]) # add each message
# print("done adding received")
# print("nodebox len: ", self.nodebox.count_messages())
# finished embrace with user, wait for next one
def __getstate__(self):
'''helper method that allows this class to be pickled'''
# ref: https://stackoverflow.com/a/41754104
pickled_self = {
'nodebox' : self.nodebox,
'version' : self.version
}
return pickled_self
def __setstate__(self, pickled_self):
'''helper method that allows this class to be unpickled'''
self.nodebox = pickled_self['nodebox']
self.socket = self.setup_socket()
self.version = pickled_self['version']
return
if __name__ == '__main__':
# announce self running
print("\n\t Pollen Node Online ")
# potentially load pickled version of old node later
# node_instance = Node()
# pickle testing (just for testing which types have pickle issues)
import pickler as pr
var_name = 'node_instance'
og_init = " = " + "Node()"
if pr.is_pickled(var_name):
exec(var_name + " = pr.get_pickled(var_name)")
else:
exec(var_name + og_init)
pr.pickle_it(var_name, eval(var_name))
# start monitoring socket indefinitely
node_instance.socket_message_monitor() |
"""Module for SIA Base Entity."""
from __future__ import annotations
from abc import abstractmethod
from dataclasses import dataclass
import logging
from pysiaalarm import SIAEvent
from homeassistant.core import CALLBACK_TYPE, State, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, EntityDescription
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import StateType
from .const import AVAILABILITY_EVENT_CODE, DOMAIN, SIA_EVENT, SIA_HUB_ZONE
from .utils import get_attr_from_sia_event, get_unavailability_interval
_LOGGER = logging.getLogger(__name__)
@dataclass
class SIARequiredKeysMixin:
"""Required keys for SIA entities."""
code_consequences: dict[str, StateType | bool]
@dataclass
class SIAEntityDescription(EntityDescription, SIARequiredKeysMixin):
"""Entity Description for SIA entities."""
class SIABaseEntity(RestoreEntity):
"""Base class for SIA entities."""
entity_description: SIAEntityDescription
def __init__(
self,
port: int,
account: str,
zone: int | None,
ping_interval: int,
entity_description: SIAEntityDescription,
unique_id: str,
name: str,
) -> None:
"""Create SIABaseEntity object."""
self.port = port
self.account = account
self.zone = zone
self.ping_interval = ping_interval
self.entity_description = entity_description
self._attr_unique_id = unique_id
self._attr_name = name
self._attr_device_info = DeviceInfo(
name=name,
identifiers={(DOMAIN, unique_id)},
via_device=(DOMAIN, f"{port}_{account}"),
)
self._cancel_availability_cb: CALLBACK_TYPE | None = None
self._attr_extra_state_attributes = {}
self._attr_should_poll = False
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
Overridden from Entity.
1. register the dispatcher and add the callback to on_remove
2. get previous state from storage and pass to entity specific function
3. if available: create availability cb
"""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIA_EVENT.format(self.port, self.account),
self.async_handle_event,
)
)
self.handle_last_state(await self.async_get_last_state())
if self._attr_available:
self.async_create_availability_cb()
@abstractmethod
def handle_last_state(self, last_state: State | None) -> None:
"""Handle the last state."""
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
Overridden from Entity.
"""
if self._cancel_availability_cb:
self._cancel_availability_cb()
@callback
def async_handle_event(self, sia_event: SIAEvent) -> None:
"""Listen to dispatcher events for this port and account and update state and attributes."""
_LOGGER.debug("Received event: %s", sia_event)
if int(sia_event.ri) not in (self.zone, SIA_HUB_ZONE):
return
self._attr_extra_state_attributes.update(get_attr_from_sia_event(sia_event))
state_changed = self.update_state(sia_event)
if state_changed or sia_event.code == AVAILABILITY_EVENT_CODE:
self.async_reset_availability_cb()
self.async_write_ha_state()
@abstractmethod
def update_state(self, sia_event: SIAEvent) -> bool:
"""Do the entity specific state updates."""
@callback
def async_reset_availability_cb(self) -> None:
"""Reset availability cb by cancelling the current and creating a new one."""
self._attr_available = True
if self._cancel_availability_cb:
self._cancel_availability_cb()
self.async_create_availability_cb()
def async_create_availability_cb(self) -> None:
"""Create a availability cb and return the callback."""
self._cancel_availability_cb = async_call_later(
self.hass,
get_unavailability_interval(self.ping_interval),
self.async_set_unavailable,
)
@callback
def async_set_unavailable(self, _) -> None:
"""Set unavailable."""
self._attr_available = False
self.async_write_ha_state()
|
import datetime
from django.core.management.base import BaseCommand, CommandError
from apps.wallet.utils import check_sync_state, sync_to_blockchain
class Command(BaseCommand):
help = "Syncs the database with the blockchain"
def handle(self, *args, **options):
sync_to_blockchain(is_dry_run=False)
# check_sync_state()
|
def sample_list(lst, iter_):
return [[lst[i] for i in it] for it in iter_]
# Return a list of all pairs of elements in a list, excluding flips.
def iter_g_2D(n):
assert n >= 2
for i in range(n):
for j in range(i + 1, n):
yield i, j
def iter_gg_3D(n):
assert n >= 3
for i in range(n):
for j in range(i + 1, n):
for k in range(j + 1, n):
yield i, j, k
def iter_gn_3D(n):
assert n >= 3
for i in range(n):
for j in range(i + 1, n):
for k in range(n):
if (k != i) and (k != j):
yield i, j, k
def iter_ggn_4D(n):
assert n >= 4
for i in range(n):
for j in range(i + 1, n):
for k in range(j + 1, n):
for l in range(n):
if (l != i) and (l != j) and (l != k):
yield i, j, k, l
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""Test your Trellis M4 Express without needing the serial output.
Press any button and the rest will light up the same color!"""
import time
from rainbowio import colorwheel
import adafruit_trellism4
trellis = adafruit_trellism4.TrellisM4Express()
for x in range(trellis.pixels.width):
for y in range(trellis.pixels.height):
pixel_index = ((y * 8) + x) * 256 // 32
trellis.pixels[x, y] = colorwheel(pixel_index & 255)
current_press = set()
while True:
pressed = set(trellis.pressed_keys)
for press in pressed - current_press:
if press:
print("Pressed:", press)
pixel = (press[1] * 8) + press[0]
pixel_index = pixel * 256 // 32
trellis.pixels.fill(colorwheel(pixel_index & 255))
for release in current_press - pressed:
if release:
print("Released:", release)
for x in range(trellis.pixels.width):
for y in range(trellis.pixels.height):
pixel_index = ((y * 8) + x) * 256 // 32
trellis.pixels[x, y] = colorwheel(pixel_index & 255)
time.sleep(0.08)
current_press = pressed
|
# ABC136d
import sys
import re
import math
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
s = input()
l = re.findall("R+L+", s)
ans = []
# print(l)
for i in l:
R = math.ceil((i.count("R")-1)/2) + math.ceil(i.count("L")/2)
L = math.ceil(i.count("R")/2) + math.ceil((i.count("L")-1)/2)
#print(i, R, L)
for j in range(len(i)):
if (i[j] == "R" and i[j + 1] == "L"):
ans.append(L)
ans.append(R)
j += 1
elif(not(i[j] == "L" and i[j - 1] == "R")):
ans.append(0)
print(*ans)
|
# ################ Author Details ################
# Name: Kesava Swamy Karedla ; Kiran Vedula
# Email: kesava-swamy.karedla@broadcom.com ; kiran-kumar.vedula@broadcom.com
# ################################################
#
# 1. test_ft_static_nat - Verify static NAT establishes a one-to-one mapping between the inside local address and an
# inside global address.
# 2. test_ft_static_nat_snat - Verify static NAT establishes a one-to-one mapping between the inside local address and
# an inside global address with nat type as snat
# 3. test_ft_static_napt - Verify static NAPT functionality for TCP traffic
# 4. test_ft_static_napt_snat - Verify static NAPT functionality for UDP traffic with nat type as snat
# 5. test_ft_static_napt_entry_remove_reapply - Verify static NAPT functionality after NAT entries are removed
# and re applied
# 6. test_ft_static_napt_same_zone - Verify that if zones are same traffic should get forwarded as per L3 table
# 7. test_ft_static_twicenat - Verify static twicenat functionality
# 8. test_ft_dynamic_napt_without_acl_bind_udp - Verify dynamic NAT establishes a mapping between an inside local
# address and an inside global address dynamically selected from a pool of global addresses. Also verifies udp entry
# time out.
# 9. test_ft_nat_docker_restart - Verify nat translation table after nat docket restart
# 10. test_ft_dynamic_nat - Verify basic dynamic nat translation
#
###################################################
import pytest
from socket import inet_aton
from binascii import hexlify
from spytest import st, tgapi, SpyTestDict
import apis.routing.ip as ipapi
import apis.routing.nat as natapi
import apis.routing.arp as arpapi
import apis.switching.vlan as vlanapi
import apis.system.basic as basicapi
import apis.system.interface as intfapi
from utilities.parallel import exec_all, ensure_no_exception
data = SpyTestDict()
dut1_rt_int_mac = None
def nat_initialize_variables():
data.in1_ip_addr = "12.12.0.1"
data.in1_ip_addr_h = ["12.12.0.2", "12.12.0.3", "12.12.0.4","12.12.0.5", "12.12.0.6", "12.12.0.7","12.12.0.8",
"12.12.0.9", "12.12.0.10", "12.12.0.11"]
data.in1_ip_addr_rt = "12.12.0.0"
data.in1_ip_addr_mask = "16"
data.in2_ip_addr = "13.13.13.1"
data.in2_ip_addr_h = ["13.13.13.2", "13.13.13.3", "13.13.13.4"]
data.in2_ip_addr_rt = "13.13.13.0"
data.in2_ip_addr_mask = "16"
data.out_ip_addr = "125.56.90.11"
data.out_ip_addr_l = ["125.56.90.12", "125.56.90.13", "125.56.90.14", "125.56.90.15"]
data.out_ip_addr_h = "125.56.90.1"
data.out_ip_range = "125.56.90.23-125.56.90.24"
data.out_ip_pool = ["125.56.90.23", "125.56.90.24"]
data.out_ip_addr_rt = "125.56.90.0"
data.out_ip_addr_mask = "24"
data.global_ip_addr_h = "129.2.30.13"
data.global_ip_addr = "129.2.30.12"
data.global_ip_addr_rt = "129.2.30.0"
data.global_ip_addr_mask = "24"
data.tw_global_ip_addr = "99.99.99.1"
data.tw_global_ip_addr_rt = "99.99.99.0"
data.tw_global_ip_addr_mask = "24"
data.test_ip_addr = "22.22.22.1"
data.test_ip_addr_mask = "16"
data.test_ip_addr_rt = "22.22.0.0"
data.tw_test_ip_addr = "15.15.0.1"
data.tw_test_ip_addr_mask = "16"
data.tw_test_ip_addr_rt = "15.15.0.0"
data.s_local_ip = "11.11.11.2"
data.s_local_ip_route = "11.11.0.0"
data.s_local_ip_mask = "16"
data.s_global_ip = "88.98.128.2"
data.s_global_ip_rt = "88.98.128.0"
data.s_global_ip_mask = "24"
data.proto_all = "all"
data.proto_tcp = "tcp"
data.proto_udp = "udp"
data.zone_1 = "0"
data.zone_2 = "1"
data.zone_3 = "2"
data.zone_4 = "3"
data.pool_name = ["pool_123_nat", "88912_pool", "123Pool"]
data.bind_name = ["bind_1", "7812_bind", "bind11"]
data.global_port_range = "333-334"
data.local_src_port = ["251", "252"]
data.local_dst_port = ["444", "8991"]
data.global_src_port = ["12001", "7781"]
data.global_dst_port = ["333", "334"]
data.tcp_src_local_port = 1002
data.tcp_dst_local_port = 3345
data.udp_src_local_port = 7781
data.udp_dst_local_port = 8812
data.tcp_src_global_port = 100
data.tcp_dst_global_port = 345
data.udp_src_global_port = 7811
data.udp_dst_global_port = 5516
data.af_ipv4 = "ipv4"
data.nat_type_snat = "snat"
data.nat_type_dnat = "dnat"
data.shell_sonic = "sonic"
data.shell_vtysh = "vtysh"
data.rate_traffic = '10'
data.pkt_count = '10'
data.host_mask = '32'
data.packet_forward_action = 'FORWARD'
data.packet_do_not_nat_action = 'DO_NOT_NAT'
data.packet_drop_action = 'DROP'
data.stage_Ing = 'INGRESS'
data.stage_Egr = 'EGRESS'
data.acl_table_nat = 'NAT_ACL'
data.acl_table_in_nat_eg = 'in_nat_eg'
data.acl_table_out_nat_eg = 'out_nat_eg'
data.acl_table_nat = 'NAT_ACL'
data.type = 'L3'
data.acl_drop_all_rule = 'INGRESS_FORWARD_L3_DROP_ALL_RULE'
data.ipv4_type = 'ipv4any'
data.tg1_src_mac_addr = '00:00:23:11:14:08'
data.tg2_src_mac_addr = '00:00:43:32:1A:01'
data.wait_time_traffic_run_to_pkt_cap = 1
data.wait_time_traffic_run = 1
data.wait_nat_udp_timeout = 60
data.wait_nat_stats = 7
data.config_add='add'
data.config_del='del'
data.twice_nat_id_1 = '100'
data.twice_nat_id_2 = '1100'
data.wait_time_after_docker_restart = 10
data.mask = '32'
if not st.is_feature_supported("klish"):
data.natcli_type = "click"
data.nat_pkt_cap_enable = True
else:
data.natcli_type = "klish"
data.nat_pkt_cap_enable = False
if st.is_vsonic():
data.nat_pkt_cap_enable = True
data.wait_nat_stats = data.wait_nat_stats + 10
@pytest.fixture(scope="module", autouse=True)
def nat_module_config(request):
nat_initialize_variables()
nat_prolog()
yield
nat_epilog()
@pytest.fixture(scope="function", autouse=True)
def nat_func_hooks(request):
intfapi.clear_interface_counters(vars.D1)
natapi.clear_nat(vars.D1, statistics=True)
yield
if st.get_func_name(request) == 'test_ft_dynamic_napt_without_acl_bind_udp':
natapi.config_nat_timeout(vars.D1, udp_timeout=300, config='set')
@pytest.mark.nat_regression
def test_ft_static_nat():
# ################################################
# Objective - Verify static NAT establishes a one-to-one mapping between the inside local address and an
# inside global address.
# #################################################
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"])
st.wait(data.wait_nat_stats)
nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[0])
if not nat_stats:
st.error("Received empty list, nat statistics not updated")
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.in1_ip_addr_h[0], data.out_ip_addr_l[0])
if not (int(nat_stats[0]['packets']) == (int(data.pkt_count))):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[0],data.out_ip_addr_l[0])
nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.out_ip_addr_l[0])
if not nat_stats:
st.error("Received empty list, nat statistics are not updated")
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[0], data.in1_ip_addr_h[0])
if not (int(nat_stats[0]['packets']) == (int(data.pkt_count))):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[0], data.in1_ip_addr_h[0])
if data.nat_pkt_cap_enable:
st.log("Validation through Packet Capture")
s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_addr_l[0])
if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"], offset_list=[26], value_list=[s_ip_addr_h]):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[0],data.out_ip_addr_l[0])
d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[0])
if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"], offset_list=[30], value_list=[d_ip_addr_h]):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[0], data.in1_ip_addr_h[0])
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_static_nat_snat():
# ################################################
# Objective - Verify static NAT establishes a one-to-one mapping between the inside local address and an
# inside global address with nat type as snat
# #################################################
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_nat_snat_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_nat_snat_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_nat_snat_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_nat_snat_data_str_id_1"])
st.wait(data.wait_nat_stats)
result = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.s_global_ip)
if not result:
st.error("Received empty list, nat statistics not updated")
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.s_global_ip, data.s_local_ip)
if not int(result[0]['packets']) == (int(data.pkt_count)):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.s_global_ip,data.s_local_ip)
nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.s_local_ip)
if not nat_stats:
st.error("Received empty list, nat statistics are not updated")
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.s_local_ip,data.s_global_ip)
if not int(nat_stats[0]['packets']) == (int(data.pkt_count)):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.s_local_ip,data.s_global_ip)
if data.nat_pkt_cap_enable:
st.log("Validation through Packet Capture")
s_ip_addr_h = util_ip_addr_to_hexa_conv(data.s_local_ip)
if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_nat_snat_data_str_id_1"], offset_list=[26], value_list=[s_ip_addr_h]):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.s_global_ip, data.s_local_ip)
d_ip_addr_h = util_ip_addr_to_hexa_conv(data.s_global_ip)
if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_nat_snat_data_str_id_1"], offset_list=[30], value_list=[d_ip_addr_h]):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.s_local_ip,data.s_global_ip)
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_static_napt():
# ################################################
# Objective - Verify static NAPT functionality for TCP traffic
# #################################################
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"])
st.wait(data.wait_nat_stats)
result = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_tcp, src_ip=data.in1_ip_addr_h[1], src_ip_port= data.tcp_src_local_port)
if not result:
st.error("Received empty list, nat statistics not updated")
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.in1_ip_addr_h[1], data.out_ip_addr_l[1])
if not (int(result[0]['packets']) == (int(data.pkt_count))):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[1],data.out_ip_addr_l[1])
nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_tcp, dst_ip=data.out_ip_addr_l[1], dst_ip_port=data.tcp_src_global_port)
if not nat_stats:
st.error("Received empty list, acl counters not updated")
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1])
if not (int(nat_stats[0]['packets']) == (int(data.pkt_count))):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1])
if data.nat_pkt_cap_enable:
st.log("Validation through Packet Capture")
s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_addr_l[1])
s_port_h = util_int_to_hexa_conv(data.tcp_src_global_port)
if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"], offset_list=[23, 26, 34], value_list=['06', s_ip_addr_h, s_port_h]):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[1],data.out_ip_addr_l[1])
d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[1])
d_port_h = util_int_to_hexa_conv(data.tcp_src_local_port)
if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"], offset_list=[23, 30, 36], value_list=['06', d_ip_addr_h, d_port_h]):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1])
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_static_napt_snat():
# ################################################
# Objective - Verify static NAPT functionality for UDP traffic with nat type as snat
# #################################################
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_napt_udp_dnat_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_napt_udp_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_napt_udp_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_napt_udp_dnat_data_str_id_1"])
st.wait(data.wait_nat_stats)
result = natapi.poll_for_nat_statistics(vars.D1, protocol=(data.proto_udp), src_ip=data.in1_ip_addr_h[2], src_ip_port=data.udp_src_local_port)
if not result:
st.error("Received empty list, nat statistics not updated")
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.out_ip_addr_l[2], data.in1_ip_addr_h[2])
if not int(result[0]['packets']) == (int(data.pkt_count)):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet",data.out_ip_addr_l[2],data.in1_ip_addr_h[2])
nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, dst_ip=data.out_ip_addr_l[2], dst_ip_port=data.udp_src_global_port)
if not nat_stats:
st.error("Received empty list, nat statistics are not updated")
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.in1_ip_addr_h[2],data.out_ip_addr_l[0])
if not int(nat_stats[0]['packets']) == (int(data.pkt_count)):
st.report_fail("dnat_translation_fail_in_packet", data.in1_ip_addr_h[2],data.out_ip_addr_l[0])
if data.nat_pkt_cap_enable:
st.log("Validation through Packet Capture")
s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_addr_l[2])
s_port_h = util_int_to_hexa_conv(data.udp_src_global_port)
if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_napt_udp_dnat_data_str_id_1"], offset_list=[23, 26, 34], value_list=['11', s_ip_addr_h, s_port_h]):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.out_ip_addr_l[2], data.in1_ip_addr_h[2])
d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[2])
d_port_h = util_int_to_hexa_conv(data.udp_src_local_port)
if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_napt_udp_dnat_data_str_id_1"], offset_list=[23, 30, 36], value_list=['11', d_ip_addr_h, d_port_h]):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.in1_ip_addr_h[2],data.out_ip_addr_l[0])
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_static_napt_entry_remove_reapply():
# ################################################
# Objective - Verify static NAPT functionality after NAT entries are removed and re applied
# #################################################
natapi.config_nat_static(vars.D1, protocol=data.proto_tcp, global_ip=data.out_ip_addr_l[1],
local_ip=data.in1_ip_addr_h[1],
local_port_id=data.tcp_src_local_port, global_port_id=data.tcp_src_global_port,
config=data.config_del, nat_type=data.nat_type_dnat)
natapi.config_nat_static(vars.D1, protocol=data.proto_tcp, global_ip=data.out_ip_addr_l[1],
local_ip=data.in1_ip_addr_h[1],
local_port_id=data.tcp_src_local_port, global_port_id=data.tcp_src_global_port,
config=data.config_add, nat_type=data.nat_type_dnat)
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"])
st.wait(data.wait_nat_stats)
result = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_tcp, src_ip=data.in1_ip_addr_h[1], src_ip_port=data.tcp_src_local_port)
if not result:
st.error("Received empty list, nat statistics not updated")
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.in1_ip_addr_h[1], data.out_ip_addr_l[1])
if not (int(result[0]['packets']) == (int(data.pkt_count))):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[1],data.out_ip_addr_l[1])
nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_tcp, dst_ip=data.out_ip_addr_l[1], dst_ip_port=data.tcp_src_global_port)
if not nat_stats:
st.error("Received empty list, nat statistics not updated")
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1])
if not (int(nat_stats[0]['packets']) == (int(data.pkt_count))):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1])
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_static_napt_same_zone():
# ################################################
# Objective - Verify that if zones are same traffic should get forwarded as per L3 table.
# #################################################
natapi.config_nat_interface(vars.D1, interface_name=vars.D1T1P2, zone_value=data.zone_1, config=data.config_add)
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"])
natapi.config_nat_interface(vars.D1, interface_name=vars.D1T1P2, zone_value=data.zone_2, config=data.config_add)
st.wait(data.wait_nat_stats)
nat_stats = natapi.get_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.out_ip_addr_l[0])
if not nat_stats:
st.error("Received empty list, nat statistics are not updated")
util_nat_debug_fun()
st.report_fail("nat_translation_happening_in_no_nat_case")
if int(nat_stats[0]['packets']) == (int(data.pkt_count)):
util_nat_debug_fun()
st.report_fail("nat_translation_happening_in_no_nat_case")
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_static_twicenat():
# ################################################
# Objective - Verify static twicenat functionality
# #################################################
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_nat_twicenat_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_nat_twicenat_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_nat_twicenat_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_nat_twicenat_data_str_id_1"])
natapi.show_nat_translations(vars.D1)
nat_stats = natapi.poll_for_twice_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.tw_global_ip_addr, dst_ip=data.out_ip_addr_l[3])
if not nat_stats:
st.error("Received empty list, nat statistics not updated")
util_nat_debug_fun()
st.report_fail("twicenat_translation_failed_in_packet")
if not int(nat_stats[0]['packets']) == (int(data.pkt_count)):
util_nat_debug_fun()
st.report_fail("twicenat_translation_failed_in_packet")
result = natapi.poll_for_twice_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[3], dst_ip=data.tw_test_ip_addr)
if not result:
st.error("Received empty list, nat statistics not updated")
util_nat_debug_fun()
st.report_fail("twicenat_translation_failed_in_packet")
if not int(result[0]['packets']) == (int(data.pkt_count)):
util_nat_debug_fun()
st.report_fail("twicenat_translation_failed_in_packet")
if data.nat_pkt_cap_enable:
st.log("Validation through Packet Capture")
s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_addr_l[3])
d_ip_addr_h = util_ip_addr_to_hexa_conv(data.tw_global_ip_addr)
if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_nat_twicenat_data_str_id_1"], offset_list=[26, 30], value_list=[s_ip_addr_h, d_ip_addr_h]):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.out_ip_addr_l[2], data.in1_ip_addr_h[2])
s_ip_addr_h = util_ip_addr_to_hexa_conv(data.tw_test_ip_addr)
d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[3])
if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_nat_twicenat_data_str_id_1"], offset_list=[26, 30], value_list=[s_ip_addr_h, d_ip_addr_h]):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.in1_ip_addr_h[2],data.out_ip_addr_l[0])
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_dynamic_napt_without_acl_bind_udp():
# ################################################
# Objective - Verify dynamic NAT establishes a mapping between an inside local address and an inside global address
# dynamically selected from a pool of global addresses. Also verifies udp entry time out.
# #################################################
natapi.config_nat_timeout(vars.D1, udp_timeout=120, config='set')
natapi.clear_nat(vars.D1, translations=True)
st.log("Traffic for snat learning via dynamic pool")
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
st.log("Traffic for data forwarding to check nat stats")
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
trn_val = natapi.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1],src_ip_port=data.local_src_port[0])
if not trn_val:
st.error("Received empty list,nat translation table not updated")
util_nat_debug_fun()
st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0])
trn_src_ip = trn_val[0]["trn_src_ip"]
trn_src_port = trn_val[0]["trn_src_ip_port"]
ip_addr_h = util_ip_addr_to_hexa_conv(trn_src_ip)
port_h = util_int_to_hexa_conv(trn_src_port)
st.log("Sending traffic for dnat verification")
tg2_str_obj = tg2_str_selector(trn_src_ip, trn_src_port)
tg2.tg_traffic_control(action='run', handle=tg2_str_obj)
tg2.tg_traffic_control(action='stop', handle=tg2_str_obj)
# Show command for debugging purpose in case of failures.
intfapi.show_interface_counters_all(vars.D1)
st.wait(data.wait_nat_stats)
nat_stats_s = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp,
src_ip=data.in1_ip_addr_h[-1], src_ip_port=data.local_src_port[0])
if not nat_stats_s:
st.error("Received empty list,nat statistics are not updated")
util_nat_debug_fun()
st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0])
if not int(nat_stats_s[0]['packets']) >= (int(data.pkt_count)):
util_nat_debug_fun()
st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0])
nat_stats_d = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, dst_ip=trn_src_ip,dst_ip_port=trn_src_port)
if not nat_stats_d:
st.error("Received empty list, nat statistics are not updated")
util_nat_debug_fun()
st.report_fail("dynamic_dnat_translation_entry_create_fail",data.out_ip_pool[0],data.out_ip_pool[0])
if not int(nat_stats_d[0]['packets']) >= (int(data.pkt_count)):
util_nat_debug_fun()
st.report_fail("dynamic_dnat_translation_entry_create_fail",data.out_ip_pool[0],data.out_ip_pool[0])
st.log("Try deleting the pool")
if not natapi.config_nat_pool(vars.D1, pool_name=data.pool_name[0], global_ip_range=data.out_ip_range,
global_port_range= data.global_port_range, config=data.config_del, skip_error=True):
util_nat_debug_fun()
st.report_fail("pool_del_fail",data.pool_name[0])
if data.nat_pkt_cap_enable:
st.log("Validation through Packet Capture")
if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"],
offset_list=[23, 26, 34], value_list=['11', ip_addr_h, port_h]):
util_nat_debug_fun()
st.report_fail("snat_translation_failed_in_packet", data.in1_ip_addr_h[-1], data.out_ip_pool[0])
ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[-1])
port_h = util_int_to_hexa_conv(data.local_src_port[0])
if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg2_str_obj,
offset_list=[23, 30, 36], value_list=['11', ip_addr_h, port_h]):
util_nat_debug_fun()
st.report_fail("dnat_translation_fail_in_packet", data.out_ip_pool[0],data.in1_ip_addr_h[-1])
st.log("NAT UDP entry timeout verification")
st.log("Waiting for UDP timeout")
st.wait(180)
trn_val = natapi.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1],
src_ip_port=data.local_src_port[0])
if trn_val:
st.error("Received non-empty list,nat translation entry exists")
util_nat_debug_fun()
st.report_fail("nat_translation_table_not_cleared")
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_nat_docker_restart():
# ################################################
# Objective - Verify nat translation table after nat docket restart
# #################################################
st.log("Sending traffic for dynamic NAT snat case")
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
basicapi.docker_operation(vars.D1,"nat","stop")
st.log("Wait for NAT docker to STOP")
st.wait(data.wait_time_after_docker_restart)
basicapi.docker_operation(vars.D1, "nat", "start")
st.log("Wait for NAT docker to START")
st.wait(data.wait_time_after_docker_restart)
trn_val = natapi.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1],
src_ip_port=data.local_src_port[0])
if trn_val:
util_nat_debug_fun()
st.report_fail("dynamic_napt_entry_exists_after_docker_restart")
trn_val_1 = natapi.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[0])
if not trn_val_1:
util_nat_debug_fun()
st.report_fail("static_nat_entry_not_restored_after_docker_restart")
st.report_pass("test_case_passed")
@pytest.mark.nat_regression
def test_ft_dynamic_nat():
# ################################################
# Objective - Verify basic dynamic nat translation
# #################################################
natapi.clear_nat(vars.D1, translations=True)
st.log("Deleting NAT Pool binding")
natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0],
config=data.config_del)
st.log("Creating NAT Pool-1 without port")
natapi.config_nat_pool(vars.D1, pool_name=data.pool_name[1], global_ip_range=data.out_ip_pool[0],
config=data.config_add)
st.log("Creating NAT Pool binding")
natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[1], pool_name=data.pool_name[1],
config=data.config_add)
st.log("Traffic for learning")
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"])
st.log("Traffic for data forwarding to check nat stats")
tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"])
tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"])
tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"])
st.wait(data.wait_nat_stats)
tc_fail_flag = 0
result = natapi.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[-1])
if not result:
st.error("Dynamic NAT failed for SRC IP")
tc_fail_flag = 1
nat_stats_s = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all,src_ip=data.in1_ip_addr_h[-1])
if not nat_stats_s:
tc_fail_flag = 1
nat_stats_d = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.out_ip_pool[0])
if not nat_stats_d:
tc_fail_flag = 1
if data.nat_pkt_cap_enable:
st.log("Validation through Packet Capture")
s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_pool[0])
if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"],
offset_list=[26], value_list=[s_ip_addr_h]):
tc_fail_flag = 1
d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[-1])
if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"],
offset_list=[30], value_list=[d_ip_addr_h]):
tc_fail_flag = 1
natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[1], pool_name=data.pool_name[1],
config=data.config_del)
natapi.config_nat_pool(vars.D1, pool_name=data.pool_name[0], global_ip_range=data.out_ip_range,
global_port_range= data.global_port_range, config=data.config_add)
natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0],
config=data.config_add)
if tc_fail_flag:
util_nat_debug_fun()
st.report_fail("dynamic_nat_fail")
st.report_pass("dynamic_nat_successful")
def nat_tg_config():
global tg_handler, tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data, tg_rt_int_handle
tg_handler = util_tg_init(vars, [vars.T1D1P1, vars.T1D1P2])
tg1 = tg_handler["tg"]
tg2 = tg_handler["tg"]
tg_ph_1 = tg_handler["tg_ph_1"]
tg_ph_2 = tg_handler["tg_ph_2"]
tg_rt_int_handle = util_tg_routing_int_config(vars, tg1, tg2, tg_ph_1, tg_ph_2)
tg_str_data = util_tg_stream_config(tg1, tg2, tg_ph_1, tg_ph_2)
def nat_dut_config():
global dut1_rt_int_mac
ipapi.config_ip_addr_interface(vars.D1, vars.D1T1P1, data.in1_ip_addr, data.in1_ip_addr_mask, family=data.af_ipv4)
for i in range(0, len(data.out_ip_addr_l)):
ipapi.config_ip_addr_interface(vars.D1, vars.D1T1P2, data.out_ip_addr_l[i], data.out_ip_addr_mask,
family=data.af_ipv4)
for i in range(0, len(data.out_ip_pool)):
ipapi.config_ip_addr_interface(vars.D1, vars.D1T1P2, data.out_ip_pool[i], data.out_ip_addr_mask,
family=data.af_ipv4)
dut1_rt_int_mac = basicapi.get_ifconfig_ether(vars.D1, vars.D1T1P1)
ipapi.create_static_route(vars.D1, data.out_ip_addr_h,
"{}/{}".format(data.global_ip_addr_rt, data.global_ip_addr_mask),
shell=data.shell_vtysh, family=data.af_ipv4)
ipapi.create_static_route(vars.D1, data.in1_ip_addr_h[0],
"{}/{}".format(data.s_global_ip_rt, data.s_global_ip_mask))
ipapi.create_static_route(vars.D1, data.out_ip_addr_h,
"{}/{}".format(data.tw_global_ip_addr_rt, data.tw_global_ip_addr_mask))
st.log("NAT Configuration")
natapi.config_nat_feature(vars.D1, 'enable')
util_nat_zone_config(vars, [vars.D1T1P1, vars.D1T1P2], [data.zone_1, data.zone_2], config=data.config_add)
natapi.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.out_ip_addr_l[0],
local_ip=data.in1_ip_addr_h[0], config=data.config_add, nat_type=data.nat_type_dnat)
natapi.config_nat_static(vars.D1, protocol=data.proto_tcp, global_ip=data.out_ip_addr_l[1],
local_ip=data.in1_ip_addr_h[1],
local_port_id=data.tcp_src_local_port, global_port_id=data.tcp_src_global_port,
config=data.config_add, nat_type=data.nat_type_dnat)
natapi.config_nat_static(vars.D1, protocol=data.proto_udp, global_ip=data.in1_ip_addr_h[2],
local_ip=data.out_ip_addr_l[2],
local_port_id=data.udp_src_global_port, global_port_id=data.udp_src_local_port,
config=data.config_add, nat_type=data.nat_type_snat)
natapi.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.s_global_ip, local_ip=data.s_local_ip,
config=data.config_add, nat_type=data.nat_type_snat)
natapi.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.out_ip_addr_l[3],
local_ip=data.in1_ip_addr_h[3],
config=data.config_add, nat_type=data.nat_type_dnat, twice_nat_id=data.twice_nat_id_1)
natapi.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.tw_global_ip_addr,
local_ip=data.tw_test_ip_addr,
config=data.config_add, nat_type=data.nat_type_snat, twice_nat_id=data.twice_nat_id_1)
natapi.show_nat_translations(vars.D1)
# dynamic NAT config
st.log("Creating NAT Pool-1")
natapi.config_nat_pool(vars.D1, pool_name=data.pool_name[0], global_ip_range=data.out_ip_range,
global_port_range=data.global_port_range, config=data.config_add)
st.log("Creating NAT Pool binding")
natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0],
config=data.config_add)
def nat_prolog():
global vars
vars = st.ensure_min_topology("D1T1:2")
platform = basicapi.get_hwsku(vars.D1)
common_constants = st.get_datastore(vars.D1, "constants", "default")
if platform.lower() in common_constants['TH3_PLATFORMS']:
st.error("NAT is not supported for this platform {}".format(platform))
st.report_unsupported('NAT_unsupported_platform',platform)
[_, exceptions] = exec_all(True, [[nat_tg_config], [nat_dut_config]], first_on_main=True)
ensure_no_exception(exceptions)
def nat_epilog():
vars = st.get_testbed_vars()
util_nat_zone_config(vars, [vars.D1T1P1, vars.D1T1P2], [data.zone_1, data.zone_2], config=data.config_del)
natapi.clear_nat_config(vars.D1)
natapi.config_nat_feature(vars.D1, 'disable')
ipapi.delete_static_route(vars.D1, data.out_ip_addr_h,
"{}/{}".format(data.global_ip_addr_rt, data.global_ip_addr_mask))
ipapi.clear_ip_configuration(st.get_dut_names())
vlanapi.clear_vlan_configuration(st.get_dut_names())
if vars.config.module_epilog_tgen_cleanup:
st.log("Clearing Routing interface config on TG ports")
tg1.tg_interface_config(port_handle=tg_ph_1, handle=tg_rt_int_handle[0]['handle'], mode='destroy')
tg1.tg_interface_config(port_handle=tg_ph_2, handle=tg_rt_int_handle[1]['handle'], mode='destroy')
tgapi.traffic_action_control(tg_handler, actions=['reset'])
def util_nat_zone_config(vars, intf, zone, config):
if config == data.config_add:
st.log("zone value configuration")
for i in range(len(intf)):
natapi.config_nat_interface(vars.D1, interface_name=intf[i], zone_value=zone[i], config=data.config_add)
else:
st.log("zone value un configuration")
for i in range(len(intf)):
natapi.config_nat_interface(vars.D1, interface_name=intf[i], zone_value=zone[i], config=data.config_del)
return True
def util_tg_init(vars, tg_port_list):
tg_port_list = list(tg_port_list) if isinstance(tg_port_list, list) else [tg_port_list]
tg_handler = tgapi.get_handles(vars, tg_port_list)
return tg_handler
def util_tg_routing_int_config(vars, tg1, tg2, tg_ph_1, tg_ph_2):
st.log("TG1 {} IPv4 address {} config".format(vars.T1D1P1,data.in1_ip_addr_h[0]))
tg1_rt_int_handle = tg1.tg_interface_config(port_handle=tg_ph_1, mode='config', intf_ip_addr=data.in1_ip_addr_h[0],
gateway=data.in1_ip_addr, netmask='255.255.0.0', arp_send_req='1',count='10', gateway_step='0.0.0.0')
st.log("TG2 {} IPv4 address {} config".format(vars.T1D1P2,data.out_ip_addr_h))
tg2_rt_int_handle = tg2.tg_interface_config(port_handle=tg_ph_2, mode='config', intf_ip_addr=data.out_ip_addr_h,
gateway=data.out_ip_addr_l[0], netmask='255.255.255.0', arp_send_req='1',count='10', gateway_step='0.0.0.0')
return tg1_rt_int_handle,tg2_rt_int_handle
def util_tg_stream_config(tg1, tg2, tg_ph_1, tg_ph_2):
result = {"tg1":{},"tg2":{}}
st.log("TG1 Stream config")
tg1_st_nat_dnat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='single_burst',
pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic,
l3_protocol='ipv4',mac_src=data.tg1_src_mac_addr, mac_dst=dut1_rt_int_mac,
ip_src_addr=data.in1_ip_addr_h[0],ip_dst_addr=data.global_ip_addr)
result["tg1"]["tg1_st_nat_dnat_data_str_id_1"] = tg1_st_nat_dnat_data_str['stream_id']
tg1_st_napt_tcp_dnat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='single_burst',
pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', mac_src=data.tg1_src_mac_addr,
mac_dst=dut1_rt_int_mac,
ip_src_addr=data.in1_ip_addr_h[1], ip_dst_addr=data.global_ip_addr, l4_protocol='tcp',
tcp_src_port=data.tcp_src_local_port, tcp_dst_port=data.tcp_dst_local_port)
result["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"] = tg1_st_napt_tcp_dnat_data_str['stream_id']
tg1_st_nat_snat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='single_burst',
pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4',
mac_src=data.tg1_src_mac_addr, mac_dst=dut1_rt_int_mac,
ip_src_addr=data.s_global_ip, ip_dst_addr=data.global_ip_addr)
result["tg1"]["tg1_st_nat_snat_data_str_id_1"] = tg1_st_nat_snat_data_str['stream_id']
tg1_st_napt_udp_dnat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create',transmit_mode='single_burst',
pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic,
l3_protocol='ipv4', mac_src=data.tg1_src_mac_addr,
mac_dst=dut1_rt_int_mac,
ip_src_addr=data.in1_ip_addr_h[2],
ip_dst_addr=data.global_ip_addr, l4_protocol='udp',
udp_src_port=data.udp_src_local_port,
udp_dst_port=data.udp_dst_local_port)
result["tg1"]["tg1_st_napt_udp_dnat_data_str_id_1"] = tg1_st_napt_udp_dnat_data_str['stream_id']
tg1_st_nat_twicenat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='single_burst', pkts_per_burst=data.pkt_count,
rate_pps=data.rate_traffic, l3_protocol='ipv4',
mac_src=data.tg1_src_mac_addr, mac_dst=dut1_rt_int_mac,
ip_src_addr=data.in1_ip_addr_h[3],ip_dst_addr=data.tw_test_ip_addr)
result["tg1"]["tg1_st_nat_twicenat_data_str_id_1"] = tg1_st_nat_twicenat_data_str['stream_id']
tg1_dyn_nat_udp_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create',
transmit_mode='single_burst',
pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic,
l3_protocol='ipv4', mac_src=data.tg1_src_mac_addr,
mac_dst=dut1_rt_int_mac,
ip_src_addr=data.in1_ip_addr_h[-1],
ip_dst_addr=data.global_ip_addr, l4_protocol='udp',
udp_src_port=data.local_src_port[0],
udp_dst_port=data.local_dst_port[0])
result["tg1"]["tg1_dyn_nat_udp_data_str_id_1"] = tg1_dyn_nat_udp_data_str['stream_id']
st.log("TG2 Stream config")
tg2_st_nat_dnat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst',
pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, l3_protocol='ipv4',
mac_src=data.tg2_src_mac_addr,mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr,
ip_dst_addr=data.out_ip_addr_l[0])
result["tg2"]["tg2_st_nat_dnat_data_str_id_1"] = tg2_st_nat_dnat_data_str['stream_id']
tg2_st_napt_tcp_dnat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst',
pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr,
mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, ip_dst_addr=data.out_ip_addr_l[1],
l4_protocol='tcp',tcp_src_port=data.tcp_dst_global_port, tcp_dst_port=data.tcp_src_global_port)
result["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"] = tg2_st_napt_tcp_dnat_data_str['stream_id']
tg2_st_nat_snat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst',
pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr,
mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr,ip_dst_addr=data.s_local_ip)
result["tg2"]["tg2_st_nat_snat_data_str_id_1"] = tg2_st_nat_snat_data_str['stream_id']
tg2_st_napt_udp_dnat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst',
pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr,
mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, ip_dst_addr=data.out_ip_addr_l[2],
l4_protocol='udp',udp_src_port=data.udp_dst_global_port, udp_dst_port=data.udp_src_global_port)
result["tg2"]["tg2_st_napt_udp_dnat_data_str_id_1"] = tg2_st_napt_udp_dnat_data_str['stream_id']
tg2_st_nat_twicenat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst', pkts_per_burst=data.pkt_count,
rate_pps=data.rate_traffic, l3_protocol='ipv4',
mac_src=data.tg2_src_mac_addr, mac_dst=dut1_rt_int_mac,
ip_src_addr=data.tw_global_ip_addr,ip_dst_addr=data.out_ip_addr_l[3])
result["tg2"]["tg2_st_nat_twicenat_data_str_id_1"] = tg2_st_nat_twicenat_data_str['stream_id']
tg2_dyn_nat_udp_1_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create',
transmit_mode='single_burst',
pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic,
l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr,
mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr,
ip_dst_addr=data.out_ip_pool[0],l4_protocol='udp', udp_src_port=data.global_src_port[0],
udp_dst_port=data.global_dst_port[0])
result["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"] = tg2_dyn_nat_udp_1_data_str['stream_id']
tg2_dyn_nat_udp_2_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create',
transmit_mode='single_burst',
pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic,
l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr,
mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr,
ip_dst_addr=data.out_ip_pool[0], l4_protocol='udp',udp_src_port=data.global_src_port[0],
udp_dst_port=data.global_dst_port[1])
result["tg2"]["tg2_dyn_nat_udp_2_data_str_id_1"] = tg2_dyn_nat_udp_2_data_str['stream_id']
tg2_dyn_nat_udp_3_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create',
transmit_mode='single_burst',
pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic,
l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr,
mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr,
ip_dst_addr=data.out_ip_pool[1], l4_protocol='udp', udp_src_port=data.global_src_port[0],
udp_dst_port=data.global_dst_port[0])
result["tg2"]["tg2_dyn_nat_udp_3_data_str_id_1"] = tg2_dyn_nat_udp_3_data_str['stream_id']
tg2_dyn_nat_udp_4_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create',
transmit_mode='single_burst',
pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic,
l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr,
mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr,
ip_dst_addr=data.out_ip_pool[1], l4_protocol='udp', udp_src_port=data.global_src_port[0],
udp_dst_port=data.global_dst_port[1])
result["tg2"]["tg2_dyn_nat_udp_4_data_str_id_1"] = tg2_dyn_nat_udp_4_data_str['stream_id']
return result
def tg2_str_selector(trn_ip, trn_port):
ip1 = data.out_ip_pool[0]
ip2 = data.out_ip_pool[1]
p1 = data.global_dst_port[0]
p2 = data.global_dst_port[1]
s1 = tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"]
s2 = tg_str_data["tg2"]["tg2_dyn_nat_udp_2_data_str_id_1"]
s3 = tg_str_data["tg2"]["tg2_dyn_nat_udp_3_data_str_id_1"]
s4 = tg_str_data["tg2"]["tg2_dyn_nat_udp_4_data_str_id_1"]
tg2_stream_map = {s1: [ip1, p1], s2: [ip1, p2], s3: [ip2, p1], s4:[ip2, p2]}
for k, v in tg2_stream_map.items():
if v ==[trn_ip, trn_port]:
return k
def util_pkt_cap(tg_tx, tg_rx, tx_p_h, rx_p_h, str_h, offset_list=[], value_list=[]):
tg_rx.tg_packet_control(port_handle=rx_p_h, action='start')
tg_tx.tg_traffic_control(action='run', handle=str_h)
tg_rx.tg_packet_control(port_handle=rx_p_h, action='stop')
tg_tx.tg_traffic_control(action='stop', handle=str_h)
pkt_cap = tg_rx.tg_packet_stats(port_handle=rx_p_h, format='var', output_type='hex')
st.log(pkt_cap)
pkt_cap_res = tgapi.validate_packet_capture(tg_type=tg_rx.tg_type, pkt_dict=pkt_cap,
offset_list=offset_list, value_list=value_list)
return pkt_cap_res
def util_ip_addr_to_hexa_conv(ipaddr):
ipaddr_h = hexlify(inet_aton(ipaddr)).upper()
return ipaddr_h
def util_int_to_hexa_conv(trn_src_port):
return (hex(int(trn_src_port)))[2:].zfill(4).upper()
def util_nat_debug_fun():
st.banner("Collecting the needed debug info for failure analysis", width=100)
intfapi.show_interface_counters_all(vars.D1)
natapi.show_nat_translations(vars.D1)
ipapi.get_interface_ip_address(vars.D1)
arpapi.show_arp(vars.D1)
|
"""This module contains auxiliary functions for the test runs."""
import shlex
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import copy
from trempy.shared.shared_auxiliary import get_random_string
from trempy.shared.shared_auxiliary import print_init_dict
from trempy.config_trempy import PREFERENCE_PARAMETERS
from trempy.custom_exceptions import TrempyError
from trempy.config_trempy import DEFAULT_BOUNDS
from trempy.config_trempy import HUGE_FLOAT
def get_random_init(constr=None):
"""Print a random dictionary."""
if constr is None:
constr = dict()
init_dict = random_dict(constr)
if 'fname' in constr.keys():
fname = constr['fname']
print_init_dict(init_dict, fname)
else:
print_init_dict(init_dict)
return init_dict
def random_dict(constr):
"""Create a random initialization file."""
dict_ = dict()
version = np.random.choice(['scaled_archimedean', 'nonstationary'])
num_questions = np.random.randint(8, 14)
fname = get_random_string()
discounting = np.random.choice([None, 'exponential', 'hyperbolic'], p=[0.8, 0.1, 0.1])
heterogeneity = np.random.choice([True, False], p=[0.1, 0.9])
df_other = np.random.choice(
['equal_univariate', 'free', 'linear', 'exponential'], p=[0.7, 0.1, 0.1, 0.1]
)
if constr is not None:
# Handle version specific data.
if 'version' in constr.keys():
version = constr['version']
if 'all_questions' in constr.keys():
num_questions = 45
if 'fname' in constr.keys():
fname = constr['fname']
if 'discounting' in constr.keys():
discounting = constr['discounting']
if 'heterogeneity' in constr.keys():
heterogeneity = constr['heterogeneity']
dict_['VERSION'] = {'version': version}
# Optional arguments for model type
if version in ['nonstationary']:
dict_['VERSION']['stationary_model'] = np.random.choice([False, True], p=[0.9, 0.1])
dict_['VERSION']['heterogeneity'] = heterogeneity
dict_['VERSION']['discounting'] = discounting
dict_['VERSION']['df_other'] = df_other
elif version in ['scaled_archimedean']:
dict_['VERSION']['stationary_model'] = True
dict_['VERSION']['heterogeneity'] = False
dict_['VERSION']['discounting'] = None
dict_['VERSION']['df_other'] = 'equal_univariate'
heterogeneity = False
sim_agents = np.random.randint(2, 10)
is_fixed = np.random.choice(
[True, False], size=num_questions + len(PREFERENCE_PARAMETERS[version]))
# We need to ensure at least one parameter is free for a valid estimation request.
if is_fixed.tolist().count('False') == 0:
is_fixed[0] = 'False'
# Bounds and values. Be careful: the order of labels matters!
bounds = [get_bounds(label, version) for label in PREFERENCE_PARAMETERS[version]]
values = [get_value(bounds[i], label, version)
for i, label in enumerate(PREFERENCE_PARAMETERS[version])]
if version in ['scaled_archimedean']:
# Initial setup to ensure constraints across options.
marginals = np.random.choice(['exponential', 'power'], 2)
upper_bounds = np.random.randint(500, 800 + 1, 2)
# We start with sampling all preference parameters.
dict_['UNIATTRIBUTE SELF'], i = dict(), 0
dict_['UNIATTRIBUTE SELF']['r'] = [values[i], is_fixed[i], bounds[i]]
dict_['UNIATTRIBUTE SELF']['max'] = upper_bounds[i]
dict_['UNIATTRIBUTE SELF']['marginal'] = marginals[i]
dict_['UNIATTRIBUTE OTHER'], i = dict(), 1
dict_['UNIATTRIBUTE OTHER']['r'] = [values[i], is_fixed[i], bounds[i]]
dict_['UNIATTRIBUTE OTHER']['max'] = upper_bounds[i]
dict_['UNIATTRIBUTE OTHER']['marginal'] = marginals[i]
dict_['MULTIATTRIBUTE COPULA'] = dict()
for i, label in enumerate(['delta', 'self', 'other']):
# We increment index because (r_self, r_other) are handled above.
j = i + 2
dict_['MULTIATTRIBUTE COPULA'][label] = [values[j], is_fixed[j], bounds[j]]
elif version in ['nonstationary']:
dict_['ATEMPORAL'] = dict()
dict_['DISCOUNTING'] = dict()
for i, label in enumerate(PREFERENCE_PARAMETERS[version]):
if label in ['alpha', 'beta', 'gamma', 'y_scale']:
dict_['ATEMPORAL'][label] = [values[i], is_fixed[i], bounds[i]]
else:
dict_['DISCOUNTING'][label] = [values[i], is_fixed[i], bounds[i]]
# Handle optional arguments. If one argument is not used, set all to None and fix them.
optional_args = ['unrestricted_weights_{}'.format(int(x)) for x in [0, 1, 3, 6, 12, 24]]
if df_other in ['equal_univariate']:
for label in optional_args:
dict_['DISCOUNTING'][label] = [None, True, [0.01, 1.00]]
elif df_other in ['free']:
pass
elif df_other in ['linear', 'exponential']:
if not label.endswith('_0'):
dict_['DISCOUNTING'][label] = [None, True, [0.01, 1.00]]
else:
raise TrempyError('version not implemented')
# General part of the init file that does not change with the version.
# Currently 16 questions are implemented.
if num_questions >= 45:
questions = list(range(1, 46))
else:
if version in ['scaled_archimedean']:
questions = np.random.choice(
[13] + list(range(31, 46)), size=num_questions, replace=False)
# print('Generated only atemporal questions because version is scaled_archimedean')
else:
if heterogeneity:
questions = np.array([1, 2])
questions = np.append(questions, np.random.choice(
list(range(3, 46)), size=(num_questions - 2), replace=False))
else:
questions = np.random.choice(list(range(1, 46)), size=num_questions, replace=False)
dict_['QUESTIONS'] = dict()
for i, q in enumerate(questions):
bounds = get_bounds(q, version)
value = get_value(bounds, q, version)
dict_['QUESTIONS'][q] = [value, is_fixed[i + len(PREFERENCE_PARAMETERS[version])], bounds]
# If heterogeneity is True, we want to unfix the first two questions and fix the rest.
if heterogeneity:
dict_['QUESTIONS'][1][1] = False
dict_['QUESTIONS'][2][1] = False
for q in questions:
if q in [1, 2]:
continue
dict_['QUESTIONS'][q] = [0.5, True, [0, HUGE_FLOAT]]
# We now add some cutoff values.
dict_['CUTOFFS'] = dict()
for q in questions:
if np.random.choice([True, False]):
dict_['CUTOFFS'][q] = get_cutoffs()
# We now turn to all simulation details.
dict_['SIMULATION'] = dict()
dict_['SIMULATION']['agents'] = sim_agents
dict_['SIMULATION']['seed'] = np.random.randint(1, 1000)
dict_['SIMULATION']['file'] = fname
# We sample valid estimation requests.
est_agents = np.random.randint(1, sim_agents)
num_skip = np.random.randint(0, sim_agents - est_agents)
dict_['ESTIMATION'] = dict()
dict_['ESTIMATION']['optimizer'] = np.random.choice(
['SCIPY-BFGS', 'SCIPY-L-BFGS-B', 'SCIPY-POWELL'])
dict_['ESTIMATION']['detailed'] = np.random.choice([True, False], p=[0.9, 0.1])
dict_['ESTIMATION']['start'] = np.random.choice(['init', 'auto'])
dict_['ESTIMATION']['agents'] = est_agents
dict_['ESTIMATION']['skip'] = num_skip
dict_['ESTIMATION']['maxfun'] = np.random.randint(1, 10)
dict_['ESTIMATION']['file'] = fname + '.trempy.pkl'
# We sample optimizer options.
dict_['SCIPY-BFGS'] = dict()
dict_['SCIPY-BFGS']['gtol'] = np.random.lognormal()
dict_['SCIPY-BFGS']['eps'] = np.random.lognormal()
dict_['SCIPY-L-BFGS-B'] = dict()
dict_['SCIPY-L-BFGS-B']['gtol'] = np.random.lognormal()
dict_['SCIPY-L-BFGS-B']['ftol'] = np.random.lognormal()
dict_['SCIPY-L-BFGS-B']['eps'] = np.random.lognormal()
dict_['SCIPY-POWELL'] = dict()
dict_['SCIPY-POWELL']['xtol'] = np.random.lognormal()
dict_['SCIPY-POWELL']['ftol'] = np.random.lognormal()
# Now we need to impose possible constraints.
if constr is not None:
if 'maxfun' in constr.keys():
dict_['ESTIMATION']['maxfun'] = constr['maxfun']
if 'num_agents' in constr.keys():
dict_['SIMULATION']['agents'] = constr['num_agents']
dict_['ESTIMATION']['agents'] = constr['num_agents']
dict_['ESTIMATION']['skip'] = 0
if 'est_file' in constr.keys():
dict_['ESTIMATION']['file'] = constr['est_file']
if 'detailed' in constr.keys():
dict_['ESTIMATION']['detailed'] = constr['detailed']
if 'start' in constr.keys():
dict_['ESTIMATION']['start'] = constr['start']
if 'optimizer' in constr.keys():
dict_['ESTIMATION']['optimizer'] = constr['optimizer']
return dict_
def get_rmse():
"""Return the RMSE from the information file."""
with open('compare.trempy.info') as in_file:
for line in in_file.readlines():
if 'RMSE' in line:
stat = shlex.split(line)[1]
if stat not in ['---']:
stat = float(stat)
return stat
def get_bounds(label, version):
"""Return a set of valid bounds tailored for each parameter."""
wedge = float(np.random.uniform(0.03, 0.50))
# Questions
if label in list(range(1, 46)):
lower = float(np.random.uniform(0.01, 0.98 - wedge))
else:
# Handle version
if version in ['scaled_archimedean']:
if label in ['r_self', 'r_other']:
lower = float(np.random.uniform(0.01, 5.0 - wedge))
elif label in ['delta', 'self', 'other']:
lower = float(np.random.uniform(0.01, 0.98 - wedge))
else:
raise TrempyError('flawed request for bounds')
elif version in ['nonstationary']:
if label in ['alpha', 'beta', 'gamma']:
lower = float(np.random.uniform(0.01, 5.0 - wedge))
elif label in ['y_scale']:
lower = float(np.random.uniform(0.01, 0.98 - wedge))
elif label.startswith('discount_factors'):
lower = float(np.random.uniform(0.01, 0.98 - wedge))
elif label.startswith('unrestricted_weights'):
lower = float(np.random.uniform(0.01, 0.98 - wedge))
else:
raise TrempyError('flawed request for bounds')
else:
raise TrempyError('version not implemented')
# Get upper bound by adding the wedge
upper = lower + wedge
# To handle exponential discounting and hyperbolic discounting.
if label not in list(range(1, 46)) and label.startswith('discount_factors'):
lower = 0.00
# We want to check the case of the default bounds as well.
if np.random.choice([True, False], p=[0.1, 0.9]):
lower = DEFAULT_BOUNDS[label][0]
if np.random.choice([True, False], p=[0.1, 0.9]):
upper = DEFAULT_BOUNDS[label][1]
bounds = [float(lower), float(upper)]
bounds = [np.around(bound, decimals=4) for bound in bounds]
return bounds
def get_value(bounds, label, version):
"""Return a value for the parameter that honors the bounds."""
lower, upper = bounds
if label in PREFERENCE_PARAMETERS[version]:
value = float(np.random.uniform(lower + 0.01, upper - 0.01))
value = np.around(value, decimals=4)
else:
# Non-preference labels
upper = min(upper, 10)
value = float(np.random.uniform(lower + 0.01, upper - 0.01))
value = np.around(value, decimals=4)
return value
def get_cutoffs():
"""Return a valid cutoff value."""
lower = np.random.uniform(-5.0, -0.01)
upper = np.random.uniform(0.01, 5.0)
cutoffs = [np.random.choice([lower, -HUGE_FLOAT], p=[0.8, 0.2]),
np.random.choice([upper, HUGE_FLOAT], p=[0.8, 0.2])]
cutoffs = [np.around(cutoff, decimals=4) for cutoff in cutoffs]
return cutoffs
def visualize_modelfit(df_simulated, df_estimated):
"""Compare the distribution of choices per question before and after estimation."""
# Remove multiindex and merge dataframes
df_start = copy.deepcopy(df_simulated)
df_stop = copy.deepcopy(df_estimated)
df_start.drop(columns=['Question', 'Individual'], inplace=True)
df_start.reset_index(inplace=True)
df_start['type'] = 'Simulated'
df_start.head()
df_stop.drop(columns=['Question', 'Individual'], inplace=True)
df_stop.reset_index(inplace=True)
df_stop['type'] = 'Estimated'
df_stop.head()
df = pd.concat([df_start, df_stop], ignore_index=True)
del df_stop, df_start
df = df.pivot_table(values='Compensation', index=['Individual'], columns=['Question', 'type'])
df.reset_index(inplace=True, drop=True)
# Drop never switchers
df[df >= 9999.0] = np.nan
# Rename columns
level0 = df.columns.get_level_values(0).astype(str)
level1 = df.columns.get_level_values(1).astype(str)
levels = zip(level1, level0)
df.columns = [': '.join(col).strip() for col in levels]
# Drop neverswitchers and drop columns with too few interior observations
for col in df.columns.tolist():
# Make sure we haven't dropped the column already.
if col not in df.columns.tolist():
continue
# Calculate percent interior
neverswitcher = df[col].isna().sum()
interior = df.shape[0] - neverswitcher
percent_interior = (interior / df.shape[0])
if percent_interior <= 0.20:
_, question = col.split(": ")
print('Dropped question: {}'.format(question))
print('Interior: {}'.format(percent_interior))
try:
df.drop('Simulated: ' + question, axis=1, inplace=True)
df.drop('Estimated: ' + question, axis=1, inplace=True)
except Exception:
print('Question {} was not in estimation file.'.format(question))
df_temporal = df.filter(regex='(Estimated|Simulated): ([1-9]$)|(([1-2][0-9])|30)')
df_risky = df.filter(regex='(Estimated|Simulated): ((3[1-9])|([4-5][0-9]))')
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 10), sharex=False, sharey=False)
for row, data in enumerate([df_risky, df_temporal]):
# Prepare plot
pos = np.arange(data.shape[1])
ylabels = data.columns
data = [data.values[:, i] for i in range(data.shape[1])]
data = [x[~np.isnan(x)] for x in data]
# Keep only nonempty arrays
nonempty = [(x.size != 0) for x in data]
data = [x for x, y in zip(data, nonempty) if y is True]
ylabels = [x for x, y in zip(ylabels, nonempty) if y is True]
# Plot
violin = axes[row].violinplot(
data, pos, points=100, vert=False, widths=1.0,
showmeans=True, showextrema=True, showmedians=False,
bw_method='silverman')
if row == 0:
axes[row].set_title('Risky', fontsize=16)
else:
axes[row].set_title('Temporal', fontsize=16)
axes[row].set_yticks(np.arange(0, len(pos), 1.0))
axes[row].set_yticklabels(ylabels)
axes[row].set_xlabel('Euro')
# Set color of mean
vp = violin['cmeans']
vp.set_edgecolor('black')
vp.set_linewidth(2)
# Set color of simulated versus estimated violin plots
index = 0
for pc in violin['bodies']:
if index % 2 == 0:
pc.set_facecolor('red')
else:
pass
index += 1
fig.suptitle("Distribution of Choices", fontsize=20)
# fig.subplots_adjust(hspace=0.4)
# Save output
plt.savefig('compare_datasets.png')
|
import logging
_LOGGER = None
def get_logger() -> logging.Logger:
global _LOGGER
if not _LOGGER:
logging.info('cache_gs logging init')
_LOGGER = logging.getLogger('cache_gs')
return _LOGGER
|
"""Provides utilities for working with FampPlex entities and relations
FamPlex is an ontology of protein families and complexes. Individual terms
are genes/proteins. There are higher level terms for families and
complexes. Terms can be connected by isa or partof relationships.
X isa Y expressing that X is a member of the Y family; Z partof W
expressing that Z is a constituent of the W complex.
Each term in the FamPlex ontology exists within a namespace and has an
identifier which is unique within that namespace. Individual genes and
proteins have either HGNC or Uniprot as a namespace. FamPlex has its own
namespace for families and complexes and the unique identifiers are
designed to be human readable. Identifiers for Uniprot are simply Uniprot
IDs. For HGNC the HGNC Symbol is used instead of the HGNC unique ID.
If X isa Y or X partof Y we say that X is a child of Y and Y is a parent of
X. We say Y is above X in the FamPlex ontology if there is a path of isa and
partof edges from X to Y. We also say that Y is an ancestor of X.
X is then below Y in the FamPlex ontology and we also say X is a descendant
of Y.
"""
import warnings
from typing import Container, Dict, List, Optional, Tuple
from famplex.graph import FamplexGraph
__all__ = ['in_famplex', 'parent_terms', 'child_terms', 'root_terms',
'ancestral_terms', 'descendant_terms', 'individual_members', 'isa',
'partof', 'refinement_of', 'dict_representation', 'equivalences',
'reverse_equivalences', 'all_root_terms']
try:
_famplex_graph = FamplexGraph()
except FileNotFoundError:
warnings.warn(
"Resource files are unavailable. If you've cloned this repository, "
"run the script \"update_resources.py\" at the top level to move the "
"resources into the package. See the README for more info.",
Warning)
def in_famplex(namespace: str, id_: str) -> bool:
"""Returns True if input term is a member of the FamPlex ontology.
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace.
Returns
-------
bool
"""
return _famplex_graph.in_famplex(namespace, id_)
def parent_terms(namespace: str, id_: str,
relation_types: Optional[Container[str]] = None) \
-> List[Tuple[str, str]]:
"""Returns terms immediately above a given term in the FamPlex ontology
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace.
relation_types : Optional[list]
Set of relation types that input term can have with returned
parent terms. The valid relation types are 'isa' and 'partof'.
If argument is None then there are no restrictions on relation
type.
Default: None
Returns
-------
list
List of tuples of the form (namespace, id) specifying parent terms
of the input term. Values are sorted in case insensitive
alphabetical order, first by namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
if relation_types is None:
relation_types = ['isa', 'partof']
edges = _famplex_graph.parent_edges(namespace, id_)
return [(ns2, id2) for ns2, id2, rel in edges if rel in relation_types]
def child_terms(namespace: str, id_: str,
relation_types:
Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""Returns terms immediately below a given term in the FamPlex ontology
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace.
relation_types : Optional[list]
Restrict edges to relation types in this list. The valid relation
types are the strings 'isa' and 'partof'.
If argument is None then both isa and partof relations are
included. Default: None
Returns
-------
list
List of tuples of the form (namespace, id) specifying child terms
of the input term. Values are sorted in case insensitive
alphabetical order, first by namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
if relation_types is None:
relation_types = ['isa', 'partof']
edges = _famplex_graph.child_edges(namespace, id_)
return [(ns2, id2) for ns2, id2, rel in edges if rel in relation_types]
def root_terms(namespace: str, id_: str) -> List[Tuple[str, str]]:
"""Returns top level terms above the input term
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
Returns
-------
list
List of terms above the input that are top level families and/or
complexes within the FamPlex ontology. Values are sorted in case
insensitive alphabetical order, first by namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
return _famplex_graph.root_terms(namespace, id_)
def ancestral_terms(namespace: str, id_: str,
relation_types:
Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""
Return list of all terms above a given term in the FamPlex Ontology
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
relation_types : Optional[list]
Restrict edges to relation types in this list. The valid relation
types are the strings 'isa' and 'partof'.
If argument is None then both isa and partof relations are
included. Default: None
Returns
-------
list
List of terms are returned in breadth first order following
relations upward from bottom to top in the ontology.
Edges from the same node are traversed in case insensitive
alphabetical order, sorted first by namespace and then by id
of the target node.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
_famplex_graph.raise_value_error_if_not_in_famplex(namespace, id_)
if relation_types is None:
relation_types = ['isa', 'partof']
output = []
for ns2, id2 in _famplex_graph.traverse((namespace, id_),
relation_types, 'up'):
output.append((ns2, id2))
return output[1:]
def descendant_terms(namespace: str, id_: str,
relation_types:
Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""
Return list of all terms below a given term in the FamPlex Ontology
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph class
Docstring for more info.
relation_types : Optional[list]
Restrict edges to relation types in this list. The valid relation
types are the strings 'isa' and 'partof'.
If argument is None then both isa and partof relations are
included. Default: None
Returns
-------
list
List of terms are returned in breadth first order following
relations backwards from top to bottom in the ontology.
Edges from the same node are traversed in case insensitive
alphabetical order, sorted first by namespace and then by id
of the target node.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
_famplex_graph.raise_value_error_if_not_in_famplex(namespace, id_)
if relation_types is None:
relation_types = ['isa', 'partof']
output = []
for ns2, id2 in _famplex_graph.traverse((namespace, id_),
relation_types, 'down'):
output.append((ns2, id2))
return output[1:]
def individual_members(namespace: str, id_: str,
relation_types:
Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""Return terms beneath a given term that are not families or complexes
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the Famplexgraph class
Docstring for more info.
relation_types : list
Restrict edges to relation types in this list. The valid relation
types are the strings 'isa' and 'partof'.
If argument is None then both isa and partof relations are
included. Default: None
Returns
-------
list
List of terms beneath the input term that have no children
themselves. If relation_types includes only 'isa', then these will
be the individual genes within a family. If only 'partof' relations
are included then these will be the individual members of a
complex. There are some terms that are families of
complexes. These have both partof and isa relationships. In these
cases the returned list can contain families or complexes
if partof or isa relationships are excluded respectively.
Values are sorted in case insensitive alphabetical order, first by
namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex. Raises
"""
if relation_types is None:
relation_types = ['isa', 'partof']
output = []
for ns2, id2 in descendant_terms(namespace, id_, relation_types):
if not child_terms(ns2, id2, relation_types=relation_types):
output.append((ns2, id2))
return sorted(output, key=lambda x: (x[0].lower(), x[1].lower()))
def isa(namespace1: str, id1: str, namespace2: str, id2: str) -> bool:
"""Return true if one term has an isa relationship with another
Parameters
----------
namespace1 : str
Namespace of first term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id1 : str
Identifier of first term.
namespace2 : str
Namespace of second term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id2 : str
Identifier of second term.
Returns
-------
bool
True if the term given by (namespace1, id1) has an isa relationship
with the term given by (namespace2, id2). Will return False if
either of (namespace1, id1) or (namespace2, id2) is not in the
FamPlex ontology.
"""
return _famplex_graph.relation(namespace1, id1, namespace2, id2, ['isa'])
def partof(namespace1: str, id1: str, namespace2: str, id2: str) -> bool:
"""Return true if one term has a partof relationship with another
Parameters
----------
namespace1 : str
Namespace of first term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id1 : str
Identifier of first term.
namespace2 : str
Namespace of second term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id2 : str
Identifier of second term.
Returns
-------
bool
True if the term given by (namespace1, id1) has a partof
relationship with the term given by (namespace2, id2). Will return
False if either of (namespace1, id1) or (namespace2, id2) is not in
the FamPlex ontology.
"""
return _famplex_graph.relation(namespace1, id1,
namespace2, id2, ['partof'])
def refinement_of(namespace: str, id1: str, namespace2: str, id2: str) -> bool:
"""Return true if one term either isa or partof holds
Parameters
----------
namespace1 : str
Namespace of first term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id1 : str
Identifier of first term.
namespace2 : str
Namespace of second term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id2 : str
Identifier of second term.
Returns
-------
bool
True if the term given by (namespace1, id1) has either an isa or
partof relationship with the term given by (namespace2, id2). Will
return False if either of (namespace1, id1) or (namespace2, id2) is
not in the FamPlex ontology.
"""
return _famplex_graph.relation(namespace, id1,
namespace2, id2, ['isa', 'partof'])
def dict_representation(namespace: str,
id_: str) -> Dict[Tuple[str, str],
List[Tuple[dict, str]]]:
"""Return a nested dictionary representation of a FamPlex term
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace.
Returns
-------
dict
Nested dictionary representing structure of a FamPlex term.
Keys are tuples with namespace, id pairs. Values are lists of
tuples of nested dictionary representations and relationships,
as in the example below. Edges are sorted in case insensitive
alphabetical order, first by namespace and then by id of the
target node.
{('FPLX', 'ESR'): [({('HGNC', 'ESR1'): []}, 'isa'),
({('HGNC', 'ESR2'): []}, 'isa')]}
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
out: Dict[Tuple[str, str], List[Tuple[dict, str]]] = \
{(namespace, id_): []}
edges = _famplex_graph.child_edges(namespace, id_)
if not edges:
return out
for namespace2, id2, relation in edges:
out[(namespace, id_)].\
append((dict_representation(namespace2, id2), relation))
return out
def equivalences(fplx_id: str,
namespaces: Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""Return list of equivalent terms from other namespaces.
Parameters
----------
fplx_id : str
A valid Famplex ID
namespaces : Optional[container]
List of namespaces returned equivalences to which returned
equivalences will be restricted. Can be used if one is interested
in a particular type of equivalences.
Returns
-------
list
List of tuples of the form (namespace, id) of equivalent terms
from other namespaces.
Raises
------
ValueError
If fplx_id an ID in the FamPlex ontology.
"""
equivs = _famplex_graph.equivalences(fplx_id)
if namespaces is not None:
equivs = [(namespace, id_) for namespace, id_ in equivs
if namespace in namespaces]
return equivs
def reverse_equivalences(namespace: str, id_: str) -> List[str]:
"""Get equivalent FamPlex terms to a given term from another namespace
Parameters
----------
namespace : str
Namespace of a term
id_ : str
id_ of a term
Returns
-------
list
List of FamPlex IDs for families or complexes equivalent to the
term given by (namespace, id_)
"""
return _famplex_graph.reverse_equivalences(namespace, id_)
def all_root_terms() -> List[Tuple[str, str]]:
"""Returns all top level families and complexes in FamPlex
Returns
-------
list
List of tuples of the form ('FPLX', id) where id runs over all
top level families and complexes in FamPlex. List is in alphabetical
order by id.
"""
return _famplex_graph.root_classes
|
from .slicer import Slicer, Alias, Obj
|
from time import gmtime
from sys import exit
from json import dumps
from os import path
import json
'''
Monty's Debug Library. Please note this software may be licenced differently
than the software that uses it.
Designed for use as a python import and should not be executed explicitly.
Copyright (C) 2018 Nicolas "Montessquio" Suarez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
# Debugger class.
class logger:
# Logging levels.
# -1 = Disabled. 0 = INFO, 1 = WARN, 2 = ERROR, 3 = Internal Logs. FATAL will trigger at all levels aside from -1.
# Manually set doIL to log ILOGS without changing overall log level. Basically -v flag.
def __init__(self, level=0, doIL = False, quiet=False, oFile = None):
self.level = level
self.doIL = doIL
self.quiet = quiet
try: self.oFile = path.abspath(oFile)
except: self.oFile = None
# Standard Logging level (1)
def log(self, msg, endl="\n"):
if((self.level >= 0) and (self.quiet == False)):
print(' ' * 100, end='\r')
try:
print("[INFO][" + __ftime__() + "] " + msg, end=endl)
except:
print("[INFO][" + __ftime__() + "] " + dumps(msg), end=endl)
if( self.oFile != None ):
try:
with open(self.oFile, "a") as f:
try:
f.write("[INFO][" + __ftime__() + "] " + msg + endl)
except:
f.write("[INFO][" + __ftime__() + "] " + dumps(msg) + endl)
except Exception as e:
print("[ERROR][" + __ftime__() + "] Unable to write to log file. Error : " + json.dumps(e), end=endl)
# More serious logging level (2)
def warn(self, msg, endl="\n"):
if(self.level >= 1):
print( (' ' * 100) + '\r', end='\r')
try:
print("[WARN][" + __ftime__() + "] " + msg, end=endl)
except:
print("[WARN][" + __ftime__() + "] " + dumps(msg), end=endl)
if( self.oFile != None ):
try:
with open(self.oFile, "a") as f:
try:
f.write("[INFO][" + __ftime__() + "] " + msg + endl)
except:
f.write("[INFO][" + __ftime__() + "] " + dumps(msg) + endl)
except:
print("[ERROR][" + __ftime__() + "] Unable to write to log file.", end=endl)
# Most serious recoverable logging level (3)
def error(self, msg, endl="\n"):
if(self.level >= 2):
print(' ' * 100, end='\r')
try:
print("[ERROR][" + __ftime__() + "] " + msg, end=endl)
except:
print("[ERROR][" + __ftime__() + "] " + dumps(msg), end=endl)
if(self.self.oFile != ""):
try:
with open(self.self.oFile, "w") as f:
try:
f.write("[INFO][" + __ftime__() + "] " + msg + endl)
except:
f.write("[INFO][" + __ftime__() + "] " + dumps(msg) + endl)
except:
print("[ERROR][" + __ftime__() + "] Unable to write to log file.", end=endl)
# Exits after execution. Optional cleanup method.
def fatal(self, msg, methodCleanup = "", endl="\n"):
if(self.level >=0):
print(' ' * 100, end='\r')
try:
print("[FATAL][" + __ftime__() + "] " + msg, end=endl)
except:
print("[FATAL][" + __ftime__() + "] " + dumps(msg), end=endl)
if( self.oFile != None ):
try:
with open(self.oFile, "a") as f:
try:
f.write("[INFO][" + __ftime__() + "] " + msg + endl)
except:
f.write("[INFO][" + __ftime__() + "] " + dumps(msg) + endl)
except:
print("[ERROR][" + __ftime__() + "] Unable to write to log file.", end=endl)
try: exec(methodCleanup) # This should not be here, use something else.
except: pass
exit()
# fatals(afe) does not exit after giving a message.
# should not be used. Instead, see error.
def fatals(self, msg, endl="\n"):
if(self.level >= 0):
print(' ' * 100, end='\r')
try:
print("[FATAL][" + __ftime__() + "] " + msg, end=endl)
except:
print("[FATAL][" + __ftime__() + "] " + dumps(msg), end=endl)
if( self.oFile != None ):
try:
with open(self.oFile, "a") as f:
try:
f.write("[INFO][" + __ftime__() + "] " + msg + endl)
except:
f.write("[INFO][" + __ftime__() + "] " + dumps(msg) + endl)
except:
print("[ERROR][" + __ftime__() + "] Unable to write to log file.", end=endl)
print("Continuing...")
# internal logger.
def ilog(self, msg, endl="\n"):
if((self.level >= 3) or (self.doIL == True)):
print(' ' * 100, end='\r')
try:
print("[iLog][" + __ftime__() + "] " + msg, end=endl)
except:
print("[iLog][" + __ftime__() + "] " + dumps(msg), end=endl)
if( self.oFile != None ):
try:
with open(self.oFile, "a") as f:
try:
f.write("[INFO][" + __ftime__() + "] " + msg)
except:
f.write("[INFO][" + __ftime__() + "] " + dumps(msg))
except:
print("[ERROR][" + __ftime__() + "] Unable to write to log file.", end=endl)
# Internal Functions
# Internal function for getting the current time string (formatted)
# Format is HH:MM:SS
def __ftime__():
return ':'.join([str(gmtime().tm_hour), str(gmtime().tm_min), str(gmtime().tm_sec)])
|
from Day3.Point import Point
def get_cell_power_level(pt, serial):
rack_id = pt.x + 10
power_level = rack_id * pt.y
power_level += serial
power_level *= rack_id
power_level_string = str(power_level)
if len(power_level_string) < 3:
power_level = 0
else:
power_level = int(power_level_string[len(power_level_string) - 3])
return power_level - 5
def get_nxn_grid_power_level(n, pt):
global serial_number
grid_power_level = 0
for i in range(0 , n):
for j in range(0, n):
grid_power_level += get_cell_power_level(Point(pt.x + i, pt.y + j), serial_number)
return grid_power_level
highest_square_power_level = 0
highest_power_level_point = Point()
serial_number = 5153
grid_edge = 3
assert get_cell_power_level(Point(3, 5), 8) == 4, 'Assertion #0 failed.'
assert get_cell_power_level(Point(122, 79), 57) == -5, 'Assertion #1 failed.'
assert get_cell_power_level(Point(217, 196), 39) == 0, 'Assertion #2 failed.'
assert get_cell_power_level(Point(101, 153), 71) == 4, 'Assertion #3 failed.'
for x in range(1, 301 - 2):
for y in range(1, 301 - 2):
current_point = Point(x, y)
current_square_power_level = get_nxn_grid_power_level(grid_edge, current_point)
if current_square_power_level > highest_square_power_level:
highest_square_power_level = current_square_power_level
highest_power_level_point = current_point
print(f'Top-left fuel cell of the 3x3 square with the largest total power is {highest_power_level_point}.')
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
# pylint: disable=missing-docstring
import os
import sys
sys.path.append('../../')
import time
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import input_test_data
import math
import numpy as np
from i3d_nonlocal import InceptionI3d
from i3d_utils import *
# Basic model parameters as external flags.
flags = tf.app.flags
gpu_num = 4
flags.DEFINE_integer('batch_size', 8, 'Batch size.')
flags.DEFINE_integer('num_frame_per_clib', 64, 'Nummber of frames per clib')
flags.DEFINE_integer('crop_size', 256, 'Crop_size')
flags.DEFINE_integer('sample_rate', 8, 'Sample rate for clib')
flags.DEFINE_integer('rgb_channels', 3, 'Channels for input')
flags.DEFINE_integer('classics', 101, 'The num of class')
flags.DEFINE_integer('block_num', 0, 'The num of nonlocal block')
flags.DEFINE_float('weight_decay', 0.000001, 'weight decay')
FLAGS = flags.FLAGS
os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7"
def run_training():
# Get the sets of images and labels for training, validation, and
# Tell TensorFlow that the model will be built into the default Graph.
pre_model_save_dir = './models/4GPU_sgd0block_i3d_600000_8_64_0.0001_decay'
# video_path_list = np.load('./data_list/test_data_list.npy')
# id_list = np.load('./data_list/test_id_list.npy')
# labels = np.load('./data_list/test_label_list.npy')
video_path_list = np.load('./data_list/test_data_list_400_3times.npy')
id_list = np.load('./data_list/test_id_list_400_3times.npy')
labels = np.load('./data_list/test_label_list_400_3times.npy')
position_list = np.load('./data_list/test_position_list_400_3times.npy')
with tf.Graph().as_default(), tf.device('/cpu:0'):
train_input_queue = tf.train.slice_input_producer([video_path_list, id_list, position_list], num_epochs=1,
shuffle=False)
video_path = train_input_queue[0]
train_ids = train_input_queue[1]
position = train_input_queue[2]
rgb_train_images, _, _ = tf.py_func(func=input_data.get_frames,
inp=[video_path[0], video_path[1], FLAGS.num_frame_per_clib,
FLAGS.crop_size, FLAGS.sample_rate, False, position],
Tout=[tf.float32, tf.double, tf.int64],
)
# rgb_train_images = Normalization(rgb_train_images, FLAGS.num_frame_per_clib/8)
batch_videos, batch_ids = tf.train.batch([rgb_train_images, train_ids], batch_size=FLAGS.batch_size * gpu_num,
capacity=200,
num_threads=20, shapes=[
(FLAGS.num_frame_per_clib / FLAGS.sample_rate, FLAGS.crop_size, FLAGS.crop_size, 3), ()])
norm_score = []
with tf.variable_scope(tf.get_variable_scope()):
for gpu_index in range(0, gpu_num):
with tf.device('/gpu:%d' % gpu_index):
with tf.name_scope('GPU_%d' % gpu_index) as scope:
rgb_logit, _ = InceptionI3d(
num_classes=FLAGS.classics,
spatial_squeeze=True,
final_endpoint='Logits',
block_num=FLAGS.block_num
)(batch_videos[gpu_index * FLAGS.batch_size:(gpu_index + 1) * FLAGS.batch_size,:,:,:,:], False)
tf.get_variable_scope().reuse_variables()
norm_score.append(predictions)
norm_score = tf.concat(norm_score, 0)
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
ckpt = tf.train.get_checkpoint_state(pre_model_save_dir)
if ckpt and ckpt.model_checkpoint_path:
print ("loading checkpoint %s,waiting......" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
print ("load complete!")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
predicts = []
ids = []
print("Start! test begin......queune and network init, this will take a few minutes, please wait..........")
try:
while not coord.should_stop():
start_time = time.time()
predict, video_id = sess.run([norm_score, batch_ids])
predicts.extend(predict)
ids.extend(video_id)
duration = time.time() - start_time
print('Test_step: %d/%d , time use: %.3f' % (video_id[-1], len(labels), duration))
except tf.errors.OutOfRangeError:
print("Test done! kill all the threads....")
finally:
coord.request_stop()
print('all threads are asked to stop!')
coord.join(threads)
np.save('./result/predicts.npy', predicts)
np.save('./result/ids.npy', ids)
np.save('./result/labels.npy', labels)
topk(predicts, labels, ids)
print('done!')
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.